text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from .column import Column
from typing import List, Union
class Numeric(Column):
_allowed_column_types = [
'INTEGER',
'INT',
'SMALLINT',
'TINYINT',
'MEDIUMINT',
'BIGINT',
'DECIMAL',
'NUMERIC',
'FLOAT',
'DOUBLE',
'BIT',
]
def __init__(
self,
name: str = '',
column_type: str = '',
length: Union[str, int] = None,
null: bool = True,
has_default: bool = False,
default: Union[str, int] = None,
unsigned: bool = None,
character_set: str = None,
collate: str = None,
auto_increment: bool = False,
enum_values: List[str] = None,
parsing_errors: List[str] = None,
parsing_warnings: List[str] = None,
):
# it would be nice to just do `def __init__(**kwargs)` and then `super().__init__(**kwargs)`
# but then we would lose our type hints. :shrug:
super().__init__(
name=name,
column_type=column_type,
length=length,
null=null,
has_default=has_default,
default=default,
unsigned=unsigned,
character_set=character_set,
collate=collate,
auto_increment=auto_increment,
enum_values=enum_values,
parsing_errors=parsing_errors,
parsing_warnings=parsing_warnings,
)
def _check_for_schema_errors_and_warnings(self):
super()._check_for_schema_errors_and_warnings()
allow_float = ['DECIMAL', 'NUMERIC', 'FLOAT', 'DOUBLE']
no_length = ['FLOAT', 'DOUBLE', 'BIT']
no_auto_increment = ['FLOAT', 'DOUBLE', 'BIT']
# I should probably have the parser auto-convert the type based on quotes/whatever, but
# currently it doesn't and I'm apparently being lazy. I'll probably regret this.
default_type = None
if type(self.default) == int:
default_type = 'int'
elif type(self.default) == float:
default_type = 'float'
elif type(self.default) == str:
default_type = 'str'
try:
int(self.default)
default_type = 'int'
except:
try:
float(self.default)
default_type = 'float'
except:
pass
if default_type == 'str':
self._schema_errors.append(
f"Column '{self.name}' of type '{self.column_type}' cannot have a string value as a default"
)
else:
if default_type == 'float' and self.column_type not in allow_float:
self._schema_errors.append(
f"Column '{self.name}' of type '{self.column_type}' must have an integer value as a default"
)
if self.column_type == 'BIT' and int(self.default) != 0 and int(self.default) != 1:
self._schema_errors.append(f"Column '{self.name}' of type 'BIT' must have a default of 1 or 0")
if self.length:
if self.column_type in no_length:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot have a length")
elif type(self.length) == str and ',' in self.length and self.column_type not in allow_float:
self._schema_errors.append(
f"Column '{self.name}' of type '{self.column_type}' must have an integer value as its length"
)
if self.character_set is not None:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot have a character set")
if self.collate is not None:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot have a collate")
if self.auto_increment and self.column_type in no_auto_increment:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot be an AUTO_INCREMENT")
if self.enum_values:
self._schema_errors.append(
"Column '%s' of type %s is not allowed to have a list of values for its length" %
(self.name, self.column_type)
)
def _is_really_the_same_default(self, column: Column) -> bool:
if self.column_type != 'DECIMAL':
return float(self.default) == float(column.default)
# Default equality is mildly tricky for decimals because 0 and 0.000 are the same,
# and if there are 4 digits after the decimal than 0.0000 and 0.00001 are the same too
# This will come up if someone sets a default in an SQL file with too many (or too few) decimals,
# while MySQL will report it properly rounded to the exact number of decimal places
split = self.length.split(',')
if len(split) == 2:
ndecimals = int(split[1])
if round(float(self.default), ndecimals) == round(float(column.default), ndecimals):
return True
return self.default == column.default
|
{
"content_hash": "60604f3860915ac84286ccb018ae9551",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 120,
"avg_line_length": 40.888888888888886,
"alnum_prop": 0.5557065217391305,
"repo_name": "cmancone/mygrations",
"id": "d8f8d24a21f949cf9a897c590b0f18eb8995102e",
"size": "5152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mygrations/core/definitions/columns/numeric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "416430"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
from django.db import models
class Nonce(models.Model):
server_url = models.CharField(max_length=2047)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
def __unicode__(self):
return u"Nonce: %s, %s" % (self.server_url, self.salt)
class Association(models.Model):
server_url = models.TextField(max_length=2047)
handle = models.CharField(max_length=255)
secret = models.TextField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField(max_length=64)
def __unicode__(self):
return u"Association: %s, %s" % (self.server_url, self.handle)
class UserOpenID(models.Model):
user = models.ForeignKey(User)
claimed_id = models.CharField(max_length=255, unique=True)
display_id = models.CharField(max_length=255)
|
{
"content_hash": "dafd8c539c97c8ac64882ddb107988a4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 32.03448275862069,
"alnum_prop": 0.697524219590958,
"repo_name": "vtemian/django-openid-auth",
"id": "3c3764a5c42762b98e3a84c389e4957ae54fc4f0",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_openid_auth/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "157274"
}
],
"symlink_target": ""
}
|
__all__ = ['pretty_print_model_fields']
def _get_class_full_name(cls_):
return cls_.__module__ + '.' + cls_.__name__
class _ModelFieldRow(object):
def __init__(self, field):
self.field = field
self.name = field.name
self.type_ = _get_class_full_name(type(field))
if self.field.many_to_many\
or self.field.many_to_one\
or self.field.one_to_many\
or self.field.one_to_one:
self.related_model = _get_class_full_name(self.field.remote_field.model)
else:
self.related_model = 'N/A'
def pretty_print(self, max_name_len, max_type_len, max_rel_model_len):
row = []
row.append(self.name)
row.append(' ' * (max_name_len - len(self.name)))
row.append('|')
row.append(self.type_)
row.append(' ' * (max_type_len - len(self.type_)))
row.append('|')
row.append(self.related_model)
row.append(' ' * (max_rel_model_len - len(self.related_model)))
print(''.join(row))
def pretty_print_model_fields(model):
field_info_rows = []
max_lens = [0, 0, 0]
for field in model._meta.get_fields():
field_info_rows.append(_ModelFieldRow(field))
max_lens[0] = max(max_lens[0], len(field_info_rows[-1].name))
max_lens[1] = max(max_lens[1], len(field_info_rows[-1].type_))
max_lens[2] = max(max_lens[2], len(field_info_rows[-1].related_model))
print('=' * (sum(max_lens) + len(max_lens) - 1))
for row in field_info_rows:
row.pretty_print(*max_lens)
print('=' * (sum(max_lens) + len(max_lens) - 1))
|
{
"content_hash": "aacbf60fc2011e76a392b3613d584091",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 35.76086956521739,
"alnum_prop": 0.5580547112462007,
"repo_name": "wwitzel3/awx",
"id": "1e637e9c1b924e183f188272ee322ced8e6ee576",
"size": "1645",
"binary": false,
"copies": "4",
"ref": "refs/heads/devel",
"path": "tools/scripts/list_fields.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303046"
},
{
"name": "Dockerfile",
"bytes": "5713"
},
{
"name": "HTML",
"bytes": "496559"
},
{
"name": "JavaScript",
"bytes": "3513112"
},
{
"name": "Makefile",
"bytes": "21133"
},
{
"name": "PowerShell",
"bytes": "10176"
},
{
"name": "Python",
"bytes": "3904288"
},
{
"name": "Shell",
"bytes": "13833"
}
],
"symlink_target": ""
}
|
"""
Flask-WhooshAlchemy-Redux
-------------
Whoosh extension to Flask/SQLAlchemy
"""
from setuptools import setup
import os
setup(
name='Flask-WhooshAlchemy-Redux',
version='0.7.1',
url='https://github.com/dhamaniasad/Flask-WhooshAlchemy',
license='BSD',
author='Asad Dhamani',
author_email='dhamaniasad+code@gmail.com',
description='Whoosh extension to Flask/SQLAlchemy',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
py_modules=['flask_whooshalchemy'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[x.strip() for x in
open(os.path.join(os.path.dirname(__file__),
'requirements.txt'))],
tests_require=['Flask-Testing'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='test.test_all',
)
|
{
"content_hash": "c45f0a02910a50099e3c8218d2a5e02b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 87,
"avg_line_length": 28.926829268292682,
"alnum_prop": 0.633220910623946,
"repo_name": "dhamaniasad/Flask-WhooshAlchemy",
"id": "37ab5d82adbc8db51949586afda375274a5a3cb8",
"size": "1186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "21089"
}
],
"symlink_target": ""
}
|
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import request_spec
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_request_spec
from nova.tests.unit.objects import test_objects
class _TestRequestSpecObject(object):
def test_image_meta_from_image_as_object(self):
# Just isolating the test for the from_dict() method
image_meta = objects.ImageMeta(name='foo')
spec = objects.RequestSpec()
spec._image_meta_from_image(image_meta)
self.assertEqual(image_meta, spec.image)
@mock.patch.object(objects.ImageMeta, 'from_dict')
def test_image_meta_from_image_as_dict(self, from_dict):
# Just isolating the test for the from_dict() method
image_meta = objects.ImageMeta(name='foo')
from_dict.return_value = image_meta
spec = objects.RequestSpec()
spec._image_meta_from_image({'name': 'foo'})
self.assertEqual(image_meta, spec.image)
def test_image_meta_from_image_as_none(self):
# just add a dumb check to have a full coverage
spec = objects.RequestSpec()
spec._image_meta_from_image(None)
self.assertIsNone(spec.image)
@mock.patch.object(base, 'obj_to_primitive')
def test_to_legacy_image(self, obj_to_primitive):
spec = objects.RequestSpec(image=objects.ImageMeta())
fake_dict = mock.Mock()
obj_to_primitive.return_value = fake_dict
self.assertEqual(fake_dict, spec._to_legacy_image())
obj_to_primitive.assert_called_once_with(spec.image)
@mock.patch.object(base, 'obj_to_primitive')
def test_to_legacy_image_with_none(self, obj_to_primitive):
spec = objects.RequestSpec(image=None)
self.assertEqual({}, spec._to_legacy_image())
self.assertFalse(obj_to_primitive.called)
def test_from_instance_as_object(self):
instance = objects.Instance()
instance.uuid = uuidutils.generate_uuid()
instance.numa_topology = None
instance.pci_requests = None
instance.project_id = '1'
instance.availability_zone = 'nova'
spec = objects.RequestSpec()
spec._from_instance(instance)
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
self.assertEqual(getattr(instance, field),
getattr(spec, 'instance_uuid'))
else:
self.assertEqual(getattr(instance, field),
getattr(spec, field))
def test_from_instance_as_dict(self):
instance = dict(uuid=uuidutils.generate_uuid(),
numa_topology=None,
pci_requests=None,
project_id='1',
availability_zone='nova')
spec = objects.RequestSpec()
spec._from_instance(instance)
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
self.assertEqual(instance.get(field),
getattr(spec, 'instance_uuid'))
else:
self.assertEqual(instance.get(field), getattr(spec, field))
@mock.patch.object(objects.InstancePCIRequests,
'from_request_spec_instance_props')
def test_from_instance_with_pci_requests(self, pci_from_spec):
fake_pci_requests = objects.InstancePCIRequests()
pci_from_spec.return_value = fake_pci_requests
instance = dict(
uuid=uuidutils.generate_uuid(),
root_gb=10,
ephemeral_gb=0,
memory_mb=10,
vcpus=1,
numa_topology=None,
project_id='1',
availability_zone='nova',
pci_requests={
'instance_uuid': 'fakeid',
'requests': [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]})
spec = objects.RequestSpec()
spec._from_instance(instance)
pci_from_spec.assert_called_once_with(instance['pci_requests'])
self.assertEqual(fake_pci_requests, spec.pci_requests)
def test_from_instance_with_numa_stuff(self):
instance = dict(
uuid=uuidutils.generate_uuid(),
root_gb=10,
ephemeral_gb=0,
memory_mb=10,
vcpus=1,
project_id='1',
availability_zone='nova',
pci_requests=None,
numa_topology={'cells': [{'id': 1, 'cpuset': ['1'], 'memory': 8192,
'pagesize': None, 'cpu_topology': None,
'cpu_pinning_raw': None}]})
spec = objects.RequestSpec()
spec._from_instance(instance)
self.assertIsInstance(spec.numa_topology, objects.InstanceNUMATopology)
cells = spec.numa_topology.cells
self.assertEqual(1, len(cells))
self.assertIsInstance(cells[0], objects.InstanceNUMACell)
def test_from_flavor_as_object(self):
flavor = objects.Flavor()
spec = objects.RequestSpec()
spec._from_flavor(flavor)
self.assertEqual(flavor, spec.flavor)
def test_from_flavor_as_dict(self):
flavor_dict = dict(id=1)
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec(ctxt)
spec._from_flavor(flavor_dict)
self.assertIsInstance(spec.flavor, objects.Flavor)
self.assertEqual({'id': 1}, spec.flavor.obj_get_changes())
def test_to_legacy_instance(self):
spec = objects.RequestSpec()
spec.flavor = objects.Flavor(root_gb=10,
ephemeral_gb=0,
memory_mb=10,
vcpus=1)
spec.numa_topology = None
spec.pci_requests = None
spec.project_id = '1'
spec.availability_zone = 'nova'
instance = spec._to_legacy_instance()
self.assertEqual({'root_gb': 10,
'ephemeral_gb': 0,
'memory_mb': 10,
'vcpus': 1,
'numa_topology': None,
'pci_requests': None,
'project_id': '1',
'availability_zone': 'nova'}, instance)
def test_to_legacy_instance_with_unset_values(self):
spec = objects.RequestSpec()
self.assertEqual({}, spec._to_legacy_instance())
def test_from_retry(self):
retry_dict = {'num_attempts': 1,
'hosts': [['fake1', 'node1']]}
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec(ctxt)
spec._from_retry(retry_dict)
self.assertIsInstance(spec.retry, objects.SchedulerRetries)
self.assertEqual(1, spec.retry.num_attempts)
self.assertIsInstance(spec.retry.hosts, objects.ComputeNodeList)
self.assertEqual(1, len(spec.retry.hosts))
self.assertEqual('fake1', spec.retry.hosts[0].host)
self.assertEqual('node1', spec.retry.hosts[0].hypervisor_hostname)
def test_from_retry_missing_values(self):
retry_dict = {}
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec(ctxt)
spec._from_retry(retry_dict)
self.assertIsNone(spec.retry)
def test_populate_group_info(self):
filt_props = {}
filt_props['group_updated'] = True
filt_props['group_policies'] = set(['affinity'])
filt_props['group_hosts'] = set(['fake1'])
filt_props['group_members'] = set(['fake-instance1'])
spec = objects.RequestSpec()
spec._populate_group_info(filt_props)
self.assertIsInstance(spec.instance_group, objects.InstanceGroup)
self.assertEqual(['affinity'], spec.instance_group.policies)
self.assertEqual(['fake1'], spec.instance_group.hosts)
self.assertEqual(['fake-instance1'], spec.instance_group.members)
def test_populate_group_info_missing_values(self):
filt_props = {}
spec = objects.RequestSpec()
spec._populate_group_info(filt_props)
self.assertIsNone(spec.instance_group)
def test_from_limits(self):
limits_dict = {'numa_topology': None,
'vcpu': 1.0,
'disk_gb': 1.0,
'memory_mb': 1.0}
spec = objects.RequestSpec()
spec._from_limits(limits_dict)
self.assertIsInstance(spec.limits, objects.SchedulerLimits)
self.assertIsNone(spec.limits.numa_topology)
self.assertEqual(1, spec.limits.vcpu)
self.assertEqual(1, spec.limits.disk_gb)
self.assertEqual(1, spec.limits.memory_mb)
def test_from_limits_missing_values(self):
limits_dict = {}
spec = objects.RequestSpec()
spec._from_limits(limits_dict)
self.assertIsInstance(spec.limits, objects.SchedulerLimits)
self.assertIsNone(spec.limits.numa_topology)
self.assertIsNone(spec.limits.vcpu)
self.assertIsNone(spec.limits.disk_gb)
self.assertIsNone(spec.limits.memory_mb)
def test_from_hints(self):
hints_dict = {'foo_str': '1',
'bar_list': ['2']}
spec = objects.RequestSpec()
spec._from_hints(hints_dict)
expected = {'foo_str': ['1'],
'bar_list': ['2']}
self.assertEqual(expected, spec.scheduler_hints)
def test_from_hints_with_no_hints(self):
spec = objects.RequestSpec()
spec._from_hints(None)
self.assertIsNone(spec.scheduler_hints)
@mock.patch.object(objects.SchedulerLimits, 'from_dict')
def test_from_primitives(self, mock_limits):
spec_dict = {'instance_type': objects.Flavor(),
'instance_properties': objects.Instance(
uuid=uuidutils.generate_uuid(),
numa_topology=None,
pci_requests=None,
project_id=1,
availability_zone='nova')}
filt_props = {}
# We seriously don't care about the return values, we just want to make
# sure that all the fields are set
mock_limits.return_value = None
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec.from_primitives(ctxt, spec_dict, filt_props)
mock_limits.assert_called_once_with({})
# Make sure that all fields are set using that helper method
for field in [f for f in spec.obj_fields if f != 'id']:
self.assertTrue(spec.obj_attr_is_set(field),
'Field: %s is not set' % field)
# just making sure that the context is set by the method
self.assertEqual(ctxt, spec._context)
def test_from_components(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
instance = fake_instance.fake_instance_obj(ctxt)
image = {'id': 'fake-image-id', 'properties': {'mappings': []},
'status': 'fake-status', 'location': 'far-away'}
flavor = fake_flavor.fake_flavor_obj(ctxt)
filter_properties = {}
instance_group = None
spec = objects.RequestSpec.from_components(ctxt, instance, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, instance_group, instance.availability_zone)
# Make sure that all fields are set using that helper method
for field in [f for f in spec.obj_fields if f != 'id']:
self.assertEqual(True, spec.obj_attr_is_set(field),
'Field: %s is not set' % field)
# just making sure that the context is set by the method
self.assertEqual(ctxt, spec._context)
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
self.assertEqual('1', spec_obj.get_scheduler_hint('foo_single'))
self.assertEqual(['1', '2'], spec_obj.get_scheduler_hint('foo_mul'))
self.assertIsNone(spec_obj.get_scheduler_hint('oops'))
self.assertEqual('bar', spec_obj.get_scheduler_hint('oops',
default='bar'))
def test_get_scheduler_hint_with_no_hints(self):
spec_obj = objects.RequestSpec()
self.assertEqual('bar', spec_obj.get_scheduler_hint('oops',
default='bar'))
@mock.patch.object(objects.RequestSpec, '_to_legacy_instance')
@mock.patch.object(base, 'obj_to_primitive')
def test_to_legacy_request_spec_dict(self, image_to_primitive,
spec_to_legacy_instance):
fake_image_dict = mock.Mock()
image_to_primitive.return_value = fake_image_dict
fake_instance = {'root_gb': 1.0,
'ephemeral_gb': 1.0,
'memory_mb': 1.0,
'vcpus': 1,
'numa_topology': None,
'pci_requests': None,
'project_id': '1',
'availability_zone': 'nova',
'uuid': '1'}
spec_to_legacy_instance.return_value = fake_instance
fake_flavor = objects.Flavor(root_gb=10,
ephemeral_gb=0,
memory_mb=512,
vcpus=1)
spec = objects.RequestSpec(num_instances=1,
image=objects.ImageMeta(),
# instance properties
numa_topology=None,
pci_requests=None,
project_id=1,
availability_zone='nova',
instance_uuid='1',
flavor=fake_flavor)
spec_dict = spec.to_legacy_request_spec_dict()
expected = {'num_instances': 1,
'image': fake_image_dict,
'instance_properties': fake_instance,
'instance_type': fake_flavor}
self.assertEqual(expected, spec_dict)
def test_to_legacy_request_spec_dict_with_unset_values(self):
spec = objects.RequestSpec()
self.assertEqual({'num_instances': 1,
'image': {},
'instance_properties': {},
'instance_type': {}},
spec.to_legacy_request_spec_dict())
def test_to_legacy_filter_properties_dict(self):
fake_numa_limits = objects.NUMATopologyLimits()
fake_computes_obj = objects.ComputeNodeList(
objects=[objects.ComputeNode(host='fake1',
hypervisor_hostname='node1')])
spec = objects.RequestSpec(
ignore_hosts=['ignoredhost'],
force_hosts=['fakehost'],
force_nodes=['fakenode'],
retry=objects.SchedulerRetries(num_attempts=1,
hosts=fake_computes_obj),
limits=objects.SchedulerLimits(numa_topology=fake_numa_limits,
vcpu=1.0,
disk_gb=10.0,
memory_mb=8192.0),
instance_group=objects.InstanceGroup(hosts=['fake1'],
policies=['affinity']),
scheduler_hints={'foo': ['bar']})
expected = {'ignore_hosts': ['ignoredhost'],
'force_hosts': ['fakehost'],
'force_nodes': ['fakenode'],
'retry': {'num_attempts': 1,
'hosts': [['fake1', 'node1']]},
'limits': {'numa_topology': fake_numa_limits,
'vcpu': 1.0,
'disk_gb': 10.0,
'memory_mb': 8192.0},
'group_updated': True,
'group_hosts': set(['fake1']),
'group_policies': set(['affinity']),
'scheduler_hints': {'foo': 'bar'}}
self.assertEqual(expected, spec.to_legacy_filter_properties_dict())
def test_to_legacy_filter_properties_dict_with_nullable_values(self):
spec = objects.RequestSpec(force_hosts=None,
force_nodes=None,
retry=None,
limits=None,
instance_group=None,
scheduler_hints=None)
self.assertEqual({}, spec.to_legacy_filter_properties_dict())
def test_to_legacy_filter_properties_dict_with_unset_values(self):
spec = objects.RequestSpec()
self.assertEqual({}, spec.to_legacy_filter_properties_dict())
@mock.patch.object(request_spec.RequestSpec,
'_get_by_instance_uuid_from_db')
def test_get_by_instance_uuid(self, get_by_uuid):
fake_spec = fake_request_spec.fake_db_spec()
get_by_uuid.return_value = fake_spec
req_obj = request_spec.RequestSpec.get_by_instance_uuid(self.context,
fake_spec['instance_uuid'])
self.assertEqual(1, req_obj.num_instances)
self.assertEqual(['host2', 'host4'], req_obj.ignore_hosts)
self.assertEqual('fake', req_obj.project_id)
self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints)
self.assertEqual(['host1', 'host3'], req_obj.force_hosts)
self.assertIsNone(req_obj.availability_zone)
self.assertEqual(['node1', 'node2'], req_obj.force_nodes)
self.assertIsInstance(req_obj.image, objects.ImageMeta)
self.assertIsInstance(req_obj.numa_topology,
objects.InstanceNUMATopology)
self.assertIsInstance(req_obj.pci_requests,
objects.InstancePCIRequests)
self.assertIsInstance(req_obj.flavor, objects.Flavor)
self.assertIsInstance(req_obj.retry, objects.SchedulerRetries)
self.assertIsInstance(req_obj.limits, objects.SchedulerLimits)
self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup)
def _check_update_primitive(self, req_obj, changes):
self.assertEqual(req_obj.instance_uuid, changes['instance_uuid'])
serialized_obj = objects.RequestSpec.obj_from_primitive(
jsonutils.loads(changes['spec']))
# primitive fields
for field in ['instance_uuid', 'num_instances', 'ignore_hosts',
'project_id', 'scheduler_hints', 'force_hosts',
'availability_zone', 'force_nodes']:
self.assertEqual(getattr(req_obj, field),
getattr(serialized_obj, field))
# object fields
for field in ['image', 'numa_topology', 'pci_requests', 'flavor',
'retry', 'limits', 'instance_group']:
self.assertDictEqual(
getattr(req_obj, field).obj_to_primitive(),
getattr(serialized_obj, field).obj_to_primitive())
def test_create(self):
req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
def _test_create_args(self2, context, changes):
self._check_update_primitive(req_obj, changes)
# DB creation would have set an id
changes['id'] = 42
return changes
with mock.patch.object(request_spec.RequestSpec, '_create_in_db',
_test_create_args):
req_obj.create()
def test_create_id_set(self):
req_obj = request_spec.RequestSpec(self.context)
req_obj.id = 3
self.assertRaises(exception.ObjectActionError, req_obj.create)
def test_save(self):
req_obj = fake_request_spec.fake_spec_obj()
def _test_save_args(self2, context, instance_uuid, changes):
self._check_update_primitive(req_obj, changes)
# DB creation would have set an id
changes['id'] = 42
return changes
with mock.patch.object(request_spec.RequestSpec, '_save_in_db',
_test_save_args):
req_obj.save()
class TestRequestSpecObject(test_objects._LocalTest,
_TestRequestSpecObject):
pass
class TestRemoteRequestSpecObject(test_objects._RemoteTest,
_TestRequestSpecObject):
pass
|
{
"content_hash": "a14f23870a9b7bd8f33e55dd5e7e77bd",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 79,
"avg_line_length": 42.92292089249493,
"alnum_prop": 0.5557865885355134,
"repo_name": "dims/nova",
"id": "d68df0171f30f5d7ec00f721b3c258c2d4d32625",
"size": "21770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/objects/test_request_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16952469"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "317320"
}
],
"symlink_target": ""
}
|
import numpy as np
from simplenet.util import orthogonalize_vectors
def test_does_not_mutate_first_selected_vector():
"""
A pair of vectors requires the calculation of a single
projection coefficient of the first vector onto the
second vector. This is the simplest scenario to test.
"""
original_vectors = np.array([[2.,5.],[8.,4.]])
orthogonalized_vectors = np.copy(original_vectors)
# Imagine that we selected the first vector, and want to orthogonalize
# the other one.
orthogonalize_vectors(original_vectors, orthogonalized_vectors, np.array([1]), 0)
# First vector unchanged.
assert np.allclose(orthogonalized_vectors[0], original_vectors[0])
def test_resulting_vectors_are_orthogonal():
"""
A pair of vectors requires the calculation of a single
projection coefficient of the first vector onto the
second vector. This is the simplest scenario to test.
"""
original_vectors = np.array([[2.,5.],[8.,4.]])
orthogonalized_vectors = np.copy(original_vectors)
# Imagine that we selected the first vector, and want to orthogonalize
# the other one.
orthogonalize_vectors(original_vectors, orthogonalized_vectors, np.array([1]), 0)
# Dotted orthogonal vectors only have a diagonal component.
dotted = np.dot(orthogonalized_vectors, orthogonalized_vectors.T)
assert np.sum(dotted) - np.trace(dotted) < 0.0001
def test_larger_vector_collection_is_made_orthogonal():
"""
A pair of vectors requires the calculation of a single
projection coefficient of the first vector onto the
second vector. This is the simplest scenario to test.
"""
original_vectors = np.array([[2.,5.,3], [8.,4.,2], [-5,7,9], [-2,-2,-4]])
orthogonalized_vectors = np.copy(original_vectors)
# Imagine that we selected the first vector, and want to orthogonalize
# the other one.
orthogonalize_vectors(original_vectors, orthogonalized_vectors, np.array([1,2,3]), 0)
orthogonalize_vectors(original_vectors, orthogonalized_vectors, np.array([2,3]), 1)
orthogonalize_vectors(original_vectors, orthogonalized_vectors, np.array([3]), 2)
# Dotted orthogonal vectors only have a diagonal component.
dotted = np.dot(orthogonalized_vectors, orthogonalized_vectors.T)
assert np.sum(dotted) - np.trace(dotted) < 0.0001
|
{
"content_hash": "00bf771e7731e90b1b1bc281a873fee7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 89,
"avg_line_length": 39.76271186440678,
"alnum_prop": 0.710997442455243,
"repo_name": "rileymcdowell/simplennet",
"id": "9a1235e5e038d4adbaa6c0959d9caa492cf4cd94",
"size": "2346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/util/test_orthogonalizer.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from flask import render_template, redirect, abort, url_for, request, \
make_response, send_from_directory, send_file
from flask_admin.contrib import sqla
from flask_security import current_user, login_required
from app import app
from app.models import Panther, Link, Location
@app.route('/')
@app.route('/index')
def index():
ops_resources = Link.query.filter_by(category='ops_resources').order_by(Link.order)
personal = Link.query.filter_by(category='personal').order_by(Link.order)
base_services = Link.query.filter_by(category='base_services').order_by(Link.order)
queep = Link.query.filter_by(category='queep').order_by(Link.order)
return render_template('index.html',
title='Lakenheath Ops Info',
ops_resources=ops_resources,
personal=personal,
base_services=base_services,
queep=queep,)
# @app.route('/contacts')
# def phone_numbers():
#
# contacts = [
# # V
# #
#
# ]
#
# return render_template('contacts.html', contacts=contacts)
@app.route('/pass')
@login_required
def pass_guidance():
return send_file('static/pass_guidance.pdf', as_attachment=False)
@app.route('/pass-form')
@login_required
def pass_form():
return send_file('static/form_988_pass.xfdl', as_attachment=True)
@app.route('/map')
@login_required
def base_map():
center = [52.397283, 0.551360]
locations = Location.query.all()
# Pass the template a dictionary representing the database entries in the
# map, like in the roster. view
locations_js = [{'label': loc.label,
'lat': loc.lat,
'lon': loc.lon} for loc in locations]
return render_template('map.html',
title="Lakenheath Map",
center=center,
# Set map type in map.js due to limitations.
# map_type='SATELLITE',
zoom=16,
locations_js=locations_js)
@app.route('/housing')
def housing():
return render_template('housing.html',
title="Housing near Lakenheath")
@app.route('/roster')
@login_required
def roster():
if not current_user.confirmed_at:
return render_template('not_confirmed.html')
panthers = Panther.query.order_by(Panther.callsign)
# Pass the template a dictionary representing the database entries in the
# roster instead of a Python model object... The dict will be converted
# to a JS object by the template engine.
panthers_js = [{'first_name': panther.first_name,
'last_name': panther.last_name,
'callsign': panther.callsign,
'email': panther.email,
'phone': panther.phone,
'flight': panther.flight,
'full_name': panther.full_name(),
'phone_formatted': panther.phone_formatted()
} for panther in panthers if panther.active]
# This prevents None values from raising javascript errors, preventing the
# roster from displaying.
for panther in panthers_js:
for key in panther:
if panther[key] is None:
panther[key] = ''
return render_template('roster.html',
title="Squadron roster",
# panthers=panthers,
panthers_js=panthers_js
)
# Create customized model view class
class AdminModelView(sqla.ModelView):
def is_accessible(self):
if not current_user.is_active() or not current_user.is_authenticated():
return False
if current_user.has_role('superuser'):
return True
return False
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated():
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
|
{
"content_hash": "3eb98829624d444ac99a5f16016a998d",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 87,
"avg_line_length": 32.64661654135338,
"alnum_prop": 0.5746199907876555,
"repo_name": "David-OConnor/lakenheath",
"id": "180d1686baecd8d4c4818ca9fa3a474c7172c4d2",
"size": "4342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135467"
},
{
"name": "HTML",
"bytes": "19255"
},
{
"name": "JavaScript",
"bytes": "7283"
},
{
"name": "Python",
"bytes": "34076"
}
],
"symlink_target": ""
}
|
""" Utils to train DistilBERT
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def git_log(folder_path: str):
"""
Log commit info.
"""
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
with open(os.path.join(folder_path, "git_log.json"), "w") as f:
json.dump(repo_infos, f, indent=4)
def init_gpu_params(params):
"""
Handle single and multi-GPU / multi-node.
"""
if params.n_gpu <= 0:
params.local_rank = 0
params.master_port = -1
params.is_master = True
params.multi_gpu = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs")
if params.n_gpu > 1:
assert params.local_rank != -1
params.world_size = int(os.environ["WORLD_SIZE"])
params.n_gpu_per_node = int(os.environ["N_GPU_NODE"])
params.global_rank = int(os.environ["RANK"])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
params.multi_gpu = True
assert params.n_nodes == int(os.environ["N_NODES"])
assert params.node_id == int(os.environ["NODE_RANK"])
# local job (single GPU)
else:
assert params.local_rank == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
params.multi_gpu = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
# summary
PREFIX = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
logger.info(PREFIX + "Node ID : %i" % params.node_id)
logger.info(PREFIX + "Local rank : %i" % params.local_rank)
logger.info(PREFIX + "World size : %i" % params.world_size)
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
logger.info(PREFIX + "Master : %s" % str(params.is_master))
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node))
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
logger.info(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed")
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
def set_seed(args):
"""
Set the random seed.
"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
|
{
"content_hash": "a93de3158813aeed72039c1081cd3592",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 90,
"avg_line_length": 30.65546218487395,
"alnum_prop": 0.6055372807017544,
"repo_name": "huggingface/transformers",
"id": "6d439453fe08ded6735208035dc0ed647849e957",
"size": "4280",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "examples/research_projects/distillation/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
import copy
class SigOptException(Exception):
pass
class ConnectionException(SigOptException):
"""
An exception that occurs when the SigOpt API was unavailable.
"""
def __init__(self, message):
super().__init__(message)
self.message = message
def __str__(self):
return '{0}: {1}'.format(
'ConnectionException',
self.message if self.message is not None else '',
)
class ApiException(SigOptException):
"""
An exception that occurs when the SigOpt API was contacted successfully, but
it responded with an error.
"""
def __init__(self, body, status_code):
self.message = body.get('message', None) if body is not None else None
self._body = body
if self.message is not None:
super().__init__(self.message)
else:
super().__init__()
self.status_code = status_code
def __str__(self):
return '{0} ({1}): {2}'.format(
'ApiException',
self.status_code,
self.message if self.message is not None else '',
)
def to_json(self):
return copy.deepcopy(self._body)
class RunException(SigOptException):
pass
class ConflictingProjectException(SigOptException):
def __init__(self, project_id):
super().__init__(f"The project with id '{project_id}' already exists.")
class ProjectNotFoundException(SigOptException):
def __init__(self, project_id):
super().__init__(
f"The project '{project_id}' does not exist.\n"
"Try any of the following steps to resolve this:\n"
f" * create a project with the ID '{project_id}' with the command\n"
f" `sigopt create project --project '{project_id}'` or by visiting\n"
" https://app.sigopt.com/projects\n"
" * change the project ID by setting the SIGOPT_PROJECT environment variable or\n"
" by renaming the current directory\n"
f" * (advanced) if the project you want to use is in a different team,\n"
" change your API token by switching to that team and then going to\n"
" https://app.sigopt.com/tokens/info"
)
|
{
"content_hash": "b3098e95bb440b4b64831b43f24fb6e1",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 89,
"avg_line_length": 31.045454545454547,
"alnum_prop": 0.6466569058077111,
"repo_name": "sigopt/sigopt-python",
"id": "cead4e2b73fd7b930c12e686ffdc632dab93283d",
"size": "2120",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sigopt/exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2411"
},
{
"name": "Makefile",
"bytes": "545"
},
{
"name": "Python",
"bytes": "542280"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
"""Tests for distutils.command.bdist_dumb."""
import os
import sys
import zipfile
import unittest
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_dumb import bdist_dumb
from distutils.tests import support
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
class BuildDumbTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(BuildDumbTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(self):
os.chdir(self.old_location)
sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildDumbTestCase, self).tearDown()
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_simple_built(self):
# let's create a simple package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_dumb(dist)
# so the output is the same no matter
# what is the platform
cmd.format = 'zip'
cmd.ensure_finalized()
cmd.run()
# see what we have
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
self.assertEqual(dist_created, [base])
# now let's check what we have in the zip file
fp = zipfile.ZipFile(os.path.join('dist', base))
try:
contents = fp.namelist()
finally:
fp.close()
contents = sorted(os.path.basename(fn) for fn in contents)
wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
if not sys.dont_write_bytecode:
wanted.append('foo.%s.pyc' % sys.implementation.cache_tag)
self.assertEqual(contents, sorted(wanted))
def test_suite():
return unittest.makeSuite(BuildDumbTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
{
"content_hash": "7d109e42398c238711694631b575c9cb",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 30.88659793814433,
"alnum_prop": 0.5607476635514018,
"repo_name": "Suwmlee/XX-Net",
"id": "9f3aaf1f655fe54c798e3b9c4cc45504272c6219",
"size": "2996",
"binary": false,
"copies": "3",
"ref": "refs/heads/python3",
"path": "Python3/lib/distutils/tests/test_bdist_dumb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "200"
},
{
"name": "C",
"bytes": "33097"
},
{
"name": "CSS",
"bytes": "86345"
},
{
"name": "HTML",
"bytes": "141382"
},
{
"name": "JavaScript",
"bytes": "345991"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "17312939"
},
{
"name": "Shell",
"bytes": "4647"
},
{
"name": "Visual Basic",
"bytes": "382"
}
],
"symlink_target": ""
}
|
"""Counterfactual explanations using linear model."""
import copy
from typing import Any, List, Optional, Text, Dict, Sequence, Iterable
from absl import logging
from lit_nlp.api import components as lit_components
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.components.citrus import lemon
from lit_nlp.components.citrus import utils as citrus_utils
from lit_nlp.lib import utils
import numpy as np
JsonDict = types.JsonDict
Spec = types.Spec
def new_example(original_example: JsonDict, field: Text, new_value: Any):
"""Deep copies the example and replaces `field` with `new_value`."""
example = copy.deepcopy(original_example)
example[field] = new_value
return example
# TODO(lit-dev): Change to calling the CachingModelWrapper for predictions
# instead of using a Dict with the prediction values.
def make_predict_fn(counterfactuals: Dict[str, Sequence[float]]):
"""Makes a predict function that returns pre-computed predictions.
Since LIT already has cached predictions for the counterfactuals, this mapping
can be used in place of a function that calls the model.
Args:
counterfactuals: a dict mapping counterfactual strings to prediction values.
Returns:
A predict function to be used in lemon.explain().
"""
def _predict_fn(sentences: Iterable[str]):
return np.array([counterfactuals.get(sentence) for sentence in sentences])
return _predict_fn
class LEMON(lit_components.Interpreter):
"""LIME-like Explanation Magic Over Novels (LEMON).
See citrus/lemon.py description for details.
"""
def __init__(self):
pass
def run(self,
inputs: List[JsonDict],
model: lit_model.Model,
dataset: lit_dataset.Dataset,
model_outputs: Optional[List[JsonDict]] = None,
config: Optional[JsonDict] = None) -> Optional[List[JsonDict]]:
"""Run this component, given a model and input(s)."""
if not inputs: return
# Find keys of input (text) segments to explain.
# Search in the input spec, since it's only useful to look at ones that are
# used by the model.
text_keys = utils.find_spec_keys(model.input_spec(), types.TextSegment)
if not text_keys:
logging.warning('LEMON requires text inputs.')
return None
logging.info('Found text fields for LEMON attribution: %s', str(text_keys))
pred_key = config['pred_key']
output_probs = np.array([output[pred_key] for output in model_outputs])
# Explain the input given counterfactuals.
# Dict[field name -> interpretations]
result = {}
# Explain each text segment in the input, keeping the others constant.
for text_key in text_keys:
sentences = [item[text_key] for item in inputs]
input_to_prediction = dict(zip(sentences, output_probs))
input_string = sentences[0]
counterfactuals = sentences[1:]
# Remove duplicate counterfactuals.
counterfactuals = list(set(counterfactuals))
logging.info('Explaining: %s', input_string)
predict_proba = make_predict_fn(input_to_prediction)
# Perturbs the input string, gets model predictions, fits linear model.
explanation = lemon.explain(
input_string,
counterfactuals,
predict_proba,
class_to_explain=config['class_to_explain'],
lowercase_tokens=config['lowercase_tokens'])
scores = np.array(explanation.feature_importance)
# Normalize feature values.
scores = citrus_utils.normalize_scores(scores)
result[text_key] = dtypes.TokenSalience(input_string.split(), scores)
return [result]
|
{
"content_hash": "a4996c5b090842d4f35eff015177f048",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 80,
"avg_line_length": 33.151785714285715,
"alnum_prop": 0.7045515755453811,
"repo_name": "pair-code/lit",
"id": "6aec6e308d881cee99e540dd0f33f3dbbbeacdb4",
"size": "4387",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "lit_nlp/components/lemon_explainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57958"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "1408"
},
{
"name": "JavaScript",
"bytes": "48969"
},
{
"name": "Liquid",
"bytes": "13294"
},
{
"name": "Python",
"bytes": "392936"
},
{
"name": "Shell",
"bytes": "1893"
},
{
"name": "TypeScript",
"bytes": "506076"
}
],
"symlink_target": ""
}
|
import lxml.html.soupparser
import pandas as pd
import requests
class StockUtils(object):
@staticmethod
def new_stocks():
url = 'http://vip.stock.finance.sina.com.cn/corp/view/vRPD_NewStockIssue.php?page=1&cngem=0&orderBy=NetDate&orderType=desc'
request = requests.get(url)
doc = lxml.html.soupparser.fromstring(request.content, features='html.parser')
table = doc.cssselect('table#NewStockTable')[0]
table.remove(table.cssselect('thead')[0])
table_html = lxml.html.etree.tostring(table).decode('utf-8')
df = pd.read_html(table_html, skiprows=[0, 1])[0]
df = df.select(lambda x: x in [0, 1, 2, 3, 7], axis=1)
df.columns = ['code', 'xcode', 'name', 'ipo_date', 'price']
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['xcode'] = df['xcode'].map(lambda x: str(x).zfill(6))
return df
|
{
"content_hash": "1fbc0008757b1b97906bee79307e2720",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 131,
"avg_line_length": 44.85,
"alnum_prop": 0.6265328874024526,
"repo_name": "Pyangs/ShiPanE-Python-SDK",
"id": "5fe16e73ff5d8928cb662267fcd091472b0a1339",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shipane_sdk/stock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "185"
},
{
"name": "Jupyter Notebook",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "118304"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
import re
import web
from libweasyl.ratings import GENERAL, MODERATE, MATURE, EXPLICIT
from weasyl import character, journal, media, searchtag, submission
from weasyl import define as d
_query_find_modifiers = {
"#submission": "submit",
"#character": "char",
"#journal": "journal",
"#user": "user",
}
_query_rating_modifiers = {
"#general": GENERAL.code,
"#moderate": MODERATE.code,
"#mature": MATURE.code,
"#explicit": EXPLICIT.code,
}
_query_delimiter = re.compile(r"[\s,;]+")
_table_information = {
"submit": (10, "s", "submission", "submitid", "subtype"),
# The subtype values for characters and journals are fake
# and set to permit us to reuse the same sql query.
"char": (20, "f", "character", "charid", 3999),
"journal": (30, "j", "journal", "journalid", 3999),
}
class Query:
def __init__(self):
self.possible_includes = set()
self.required_includes = set()
self.required_excludes = set()
self.required_user_includes = set()
self.required_user_excludes = set()
self.ratings = set()
self.find = None
def add_criterion(self, criterion):
def add_nonempty(s, item):
if item:
s.add(item)
find_modifier = _query_find_modifiers.get(criterion)
if find_modifier:
self.find = find_modifier
return
rating_modifier = _query_rating_modifiers.get(criterion)
if rating_modifier:
self.ratings.add(rating_modifier)
return
if criterion.startswith(("user:", "+user:")):
user = d.get_sysname(criterion.split(":", 1)[1])
add_nonempty(self.required_user_includes, user)
elif criterion.startswith("-user:"):
user = d.get_sysname(criterion.split(":", 1)[1])
add_nonempty(self.required_user_excludes, user)
elif criterion.startswith("+"):
tag = d.get_search_tag(criterion[1:])
add_nonempty(self.required_includes, tag)
elif criterion.startswith("-"):
tag = d.get_search_tag(criterion[1:])
add_nonempty(self.required_excludes, tag)
elif criterion.startswith("|"):
tag = d.get_search_tag(criterion[1:])
add_nonempty(self.possible_includes, tag)
else:
tag = d.get_search_tag(criterion)
add_nonempty(self.required_includes, tag)
def __nonzero__(self):
return bool(
self.possible_includes or
self.required_includes or
self.required_excludes or
self.required_user_includes or
self.required_user_excludes or
self.ratings)
@classmethod
def parse(cls, query_string, find_default):
"""
Parses a search query string into collections of tags and users.
"""
query = Query()
for criterion in _query_delimiter.split(query_string.strip()):
if criterion:
query.add_criterion(criterion)
query.possible_includes.difference_update(query.required_includes)
query.required_excludes.difference_update(query.required_includes)
query.possible_includes.difference_update(query.required_excludes)
if query.find is None:
query.find = find_default
query.text = query_string
return query
def select_users(q):
terms = q.lower().split()
statement = """
SELECT userid, full_name, unixtime, username FROM profile
WHERE LOWER(username) SIMILAR TO ('%%(' || %(terms)s || ')%%') ESCAPE ''
OR LOWER(full_name) SIMILAR TO ('%%(' || %(terms)s || ')%%') ESCAPE ''
ORDER BY username
LIMIT 100
"""
query = d.engine.execute(statement, terms="|".join(terms))
ret = [{
"contype": 50,
"userid": i.userid,
"title": i.full_name,
"rating": "",
"unixtime": i.unixtime,
"username": i.username,
} for i in query]
media.populate_with_user_media(ret)
return ret
def select(userid, rating, limit,
search, within, cat, subcat, backid, nextid):
type_code, type_letter, table, select, subtype = _table_information[search.find]
# Begin statement
statement_from = ["FROM {table} content INNER JOIN profile ON content.userid = profile.userid"]
statement_where = ["WHERE content.rating <= %(rating)s AND content.settings !~ '[fhm]'"]
statement_group = []
if search.find == "submit":
statement_from.append("INNER JOIN submission_tags ON content.submitid = submission_tags.submitid")
if search.required_includes:
if search.find == "submit":
statement_from.append("AND submission_tags.tags @> %(required_includes)s")
else:
statement_from.append("INNER JOIN searchmap{find} ON targetid = content.{select}")
statement_where.append("AND searchmap{find}.tagid = ANY (%(required_includes)s)")
statement_group.append(
"GROUP BY content.{select}, profile.username HAVING COUNT(searchmap{find}.tagid) = %(required_include_count)s")
# Submission category or subcategory
if search.find == "submit":
if subcat:
statement_where.append("AND content.subtype = %(subcategory)s")
elif cat:
statement_where.append("AND content.subtype >= %(category)s AND content.subtype < %(category)s + 1000")
if userid:
if within == "notify":
# Search within notifications
statement_from.append("INNER JOIN welcome ON welcome.targetid = content.{select}")
statement_where.append("AND welcome.userid = %(userid)s")
statement_where.append({
"submit": "AND welcome.type IN (2010, 2030, 2040)",
"char": "AND welcome.type = 2050",
"journal": "AND welcome.type IN (1010, 1020)",
}[search.find])
elif within == "fave":
# Search within favorites
statement_from.append("INNER JOIN favorite ON favorite.targetid = content.{select}")
statement_where.append("AND favorite.userid = %(userid)s AND favorite.type = %(type)s")
elif within == "friend":
# Search within friends content
statement_from.append(
"INNER JOIN frienduser ON (frienduser.userid, frienduser.otherid) = (%(userid)s, content.userid)"
" OR (frienduser.userid, frienduser.otherid) = (content.userid, %(userid)s)")
elif within == "follow":
# Search within following content
statement_from.append(
"INNER JOIN watchuser ON (watchuser.userid, watchuser.otherid) = (%(userid)s, content.userid)")
# Search within rating
if userid and search.ratings:
statement_where.append("AND content.rating = ANY (%(ratings)s)")
# Blocked tags and ignored users
if userid:
statement_where.append("""
AND NOT EXISTS (
SELECT 0 FROM ignoreuser
WHERE userid = %(userid)s
AND otherid = content.userid)
AND NOT EXISTS (
SELECT 0 FROM searchmap{find}
WHERE targetid = content.{select}
AND tagid IN (SELECT tagid FROM blocktag WHERE userid = %(userid)s AND rating <= content.rating))
""")
if search.possible_includes:
if search.find == "submit":
statement_where.append("AND submission_tags.tags && %(possible_includes)s")
else:
statement_where.append("""
AND EXISTS (
SELECT 0 FROM searchmap{find}
WHERE targetid = content.{select}
AND tagid = ANY (%(possible_includes)s)
)
""")
if search.required_excludes:
if search.find == "submit":
statement_where.append("AND NOT submission_tags.tags && %(required_excludes)s")
else:
statement_where.append("""
AND NOT EXISTS (
SELECT 0 FROM searchmap{find}
WHERE targetid = content.{select}
AND tagid = ANY (%(required_excludes)s)
)
""")
if search.required_user_includes:
statement_from.append("INNER JOIN login login_include ON content.userid = login_include.userid")
statement_where.append("AND login_include.login_name = ANY (%(required_user_includes)s)")
if search.required_user_excludes:
statement_from.append("INNER JOIN login login_exclude ON content.userid = login_exclude.userid")
statement_where.append("AND login_exclude.login_name != ALL (%(required_user_excludes)s)")
def make_statement(statement_select, statement_additional_where, statement_order):
return " ".join([
statement_select,
" ".join(statement_from),
" ".join(statement_where),
statement_additional_where,
" ".join(statement_group),
statement_order,
]).format(
table=table,
find=search.find,
select=select,
subtype=subtype,
title_field="char_name" if search.find == "char" else "title"
)
pagination_filter = (
"AND content.{select} > %(backid)s" if backid else
"AND content.{select} < %(nextid)s" if nextid else
"")
statement = make_statement(
"""
SELECT
content.{select}, content.{title_field} AS title, content.rating, content.unixtime, content.userid,
content.settings, profile.username, {subtype} as subtype
""",
pagination_filter,
"ORDER BY content.{{select}} {order} LIMIT %(limit)s".format(order="" if backid else "DESC"))
all_names = (
search.possible_includes |
search.required_includes |
search.required_excludes)
tag_ids = searchtag.get_ids(all_names)
def get_ids(names):
return [tag_ids.get(name, -1) for name in names]
params = {
"possible_includes": get_ids(search.possible_includes),
"required_includes": get_ids(search.required_includes),
"required_excludes": get_ids(search.required_excludes),
"required_user_includes": list(search.required_user_includes),
"required_user_excludes": list(search.required_user_excludes),
"type": type_letter,
"userid": userid,
"rating": rating,
"ratings": list(search.ratings),
"category": cat,
"subcategory": subcat,
"limit": limit,
"backid": backid,
"nextid": nextid,
"required_include_count": len(search.required_includes),
}
query = d.engine.execute(statement, **params)
ret = [{
"contype": type_code,
select: i[select],
"title": i.title,
"subtype": i.subtype,
"rating": i.rating,
"unixtime": i.unixtime,
"userid": i.userid,
"username": i.username,
"settings": i.settings,
} for i in query]
if search.find == "submit":
media.populate_with_submission_media(ret)
elif search.find == "char":
for r in ret:
r["sub_media"] = character.fake_media_items(
r["charid"], r["userid"], d.get_sysname(r["username"]), r["settings"])
elif search.find == "journal":
media.populate_with_user_media(ret)
if backid:
back_count = d.engine.execute(
make_statement("SELECT COUNT(*) FROM (SELECT 1", pagination_filter, ") _"), **params).scalar() - len(ret)
elif nextid:
back_count = (d.engine.execute(
make_statement("SELECT COUNT(*) FROM (SELECT 1", "AND content.{select} >= %(nextid)s", ") _"),
**params).scalar())
else:
back_count = 0
if backid:
next_count = (d.engine.execute(
make_statement("SELECT COUNT(*) FROM (SELECT 1", "AND content.{select} <= %(backid)s", ") _"),
**params).scalar())
return list(reversed(ret)), next_count, back_count
else:
next_count = d.engine.execute(
make_statement("SELECT COUNT(*) FROM (SELECT 1", pagination_filter, ") _"), **params).scalar() - len(ret)
return ret, next_count, back_count
# form
# find backid
# cat nextid
def browse(userid, rating, limit, form, find=None, config=None):
backid = d.get_int(form.backid)
nextid = d.get_int(form.nextid)
if find:
form.find = find
if form.find == "char":
query = character.select_list(userid, rating, limit, backid=backid, nextid=nextid, config=config)
elif form.find == "journal":
query = journal.select_user_list(userid, rating, limit, backid=backid, nextid=nextid, config=config)
else:
query = submission.select_list(userid, rating, limit, backid=backid, nextid=nextid,
subcat=d.get_int(form.cat) if d.get_int(form.cat) in [1000, 2000, 3000] else None,
config=config)
if query and not backid:
backid = query[0][form.find + "id"]
if query and not nextid:
nextid = query[-1][form.find + "id"]
return query
|
{
"content_hash": "221966a604d21c3721ce3b6b0f706bd6",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 127,
"avg_line_length": 36.53150684931507,
"alnum_prop": 0.5823458827058647,
"repo_name": "dzamie/weasyl",
"id": "cb503a1850865cb6ae7fa577af7ab12f81a56a76",
"size": "13334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weasyl/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "102056"
},
{
"name": "HTML",
"bytes": "464943"
},
{
"name": "Java",
"bytes": "304532"
},
{
"name": "JavaScript",
"bytes": "1323937"
},
{
"name": "Makefile",
"bytes": "11885"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "934161"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
}
|
"""
This file holds utility functions that have no dependencies on other console code.
Avoids import loops
"""
import time
import webcolors
import importlib, os
import config
def importmodules(dir: str):
# dir of form relative path name
importlist = {}
path = dir.split('/')
if path[0] == '': del path[0]
pypath = '.'.join(path) + '.'
impdir = '/'.join(path)
# print('Dir {} Digested {} Path {}'.format(dir, pypath, impdir))
for modulename in os.listdir(os.getcwd() + '/' + impdir):
if '__' not in modulename:
splitname = os.path.splitext(modulename)
if splitname[1] == '.py':
#print('import {}{} using {}'.format(pypath, splitname[0], modulename))
importlist[splitname[0]] = importlib.import_module(pypath + splitname[0])
return importlist
def wc(clr, factor=0.0, layercolor=(255, 255, 255)):
lc = webcolors.name_to_rgb(layercolor.lower()) if isinstance(layercolor, str) else layercolor
if isinstance(clr, str):
try:
v = webcolors.name_to_rgb(clr.lower())
except ValueError:
# logsupport.Logs.Log('Bad color name: ' + str(clr), severity=ConsoleWarning)
v = webcolors.name_to_rgb('black')
else:
v = clr
try:
return v[0] + (lc[0] - v[0]) * factor, v[1] + (lc[1] - v[1]) * factor, v[2] + (lc[2] - v[2]) * factor
except Exception as E:
print('wc: {}'.format(E))
print(v, lc, clr, layercolor)
def interval_str(sec_elapsed, shrt=False):
d = int(sec_elapsed / (60 * 60 * 24))
h = int((sec_elapsed % (60 * 60 * 24)) / 3600)
m = int((sec_elapsed % (60 * 60)) / 60)
s = int(sec_elapsed % 60)
if d != 0:
if shrt:
return "{} dys {:>02d}:{:>02d}:{:>02d}".format(d, h, m, s)
else:
return "{} days {:>02d}hrs {:>02d}mn {:>02d}sec".format(d, h, m, s)
elif h != 0:
return "{:>02d}hrs {:>02d}mn {:>02d}sec".format(h, m, s)
else:
return "{:>02d}mn {:>02d}sec".format(m, s)
def BoolTrueWord(v):
if v is None: return False
if isinstance(v, bool): return v
try:
return v.lower() in ('true', 'on', 'yes')
except Exception as e:
print("Error1: {}".format(v))
def BoolFalseWord(v):
if v is None: return True
if isinstance(v, bool): return not v
try:
return v.lower() in ('false', 'off', 'no')
except Exception as e:
print("Error2: {}".format(v))
def TreeDict(d, args):
# Allow a nest of dictionaries to be accessed by a tuple of keys for easier code
if len(args) == 1:
temp = d[args[0]]
#temp = getattr(d,args[0])
if isinstance(temp, str) and temp.isdigit():
temp = int(temp)
else:
try:
temp = float(temp)
except (ValueError, TypeError):
pass
return temp
else:
return TreeDict(d[args[0]], args[1:])
#return TreeDict(getattr(d,args[0]),args[1:])
import string
class PartialFormatter(string.Formatter):
def __init__(self, missing='--', bad_fmt='--'):
self.missing, self.bad_fmt = missing, bad_fmt
def get_field(self, field_name, args, kwargs):
# Handle a key not found
try:
val = super().get_field(field_name, args, kwargs)
except (KeyError, AttributeError):
val = None, field_name
return val
def format_field(self, value, spec):
# handle an invalid format
if value is None: return self.missing
try:
return super().format_field(value, spec)
except ValueError:
if self.bad_fmt is not None:
return self.bad_fmt
else:
raise
fmt = PartialFormatter()
# noinspection PyBroadException
isdevsystem = False
ishomesystem = False
def safeprint(*args, **kwargs):
if isdevsystem or ishomesystem:
try:
print(time.strftime('%m-%d-%y %H:%M:%S'), *args, **kwargs)
except OSError:
with open('/home/pi/Console/disconnectederrors.log', 'a') as f:
print(*args, **kwargs, file=f)
def RepresentsInt(s):
try:
int(s)
return True
except (ValueError, TypeError):
return False
'''
class WFormatter(string.Formatter):
def format_field(self, value, format_spec):
if format_spec.endswith(('f', 'd')) and value is None:
return 'n/a'
elif value is None:
return 'n/a'
elif value == -9999.0:
return 'n/a'
else:
return super(WFormatter, self).format_field(value, format_spec)
'''
|
{
"content_hash": "c1665ac5b99fa1e3f308be34e3cfcaf4",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 103,
"avg_line_length": 26.801324503311257,
"alnum_prop": 0.6449221645663454,
"repo_name": "kevinkahn/softconsole",
"id": "4d7c082c3204e8770f750d60906fd29b04bdaf57",
"size": "4047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/utilfuncs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Euphoria",
"bytes": "267"
},
{
"name": "Python",
"bytes": "839903"
},
{
"name": "Shell",
"bytes": "101927"
}
],
"symlink_target": ""
}
|
import urllib
import jinja2
from django_jinja import library
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils.http import urlquote
from django.core.urlresolvers import reverse
from django.template.defaultfilters import linebreaksbr, urlize
from airmozilla.base.utils import get_abs_static
@library.global_function
def static(path):
return staticfiles_storage.url(path)
@library.filter
def urlencode(txt):
"""Url encode a path."""
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return urllib.quote_plus(txt)
@library.global_function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
return reverse(viewname, args=args, kwargs=kwargs)
@library.global_function
@jinja2.contextfunction
def abs_static(context, path):
"""Make sure we always return a FULL absolute URL that starts
with 'http'.
"""
return get_abs_static(path, context['request'])
@library.global_function
def show_duration(duration, include_seconds=False):
if isinstance(duration, float):
# e.g. 16.61 means 16 seconds and 61 milliseconds
duration, milliseconds = str(duration).split('.')
duration = int(duration)
hours = duration / 3600
seconds = duration % 3600
minutes = seconds / 60
seconds = seconds % 60
out = []
if hours > 1:
out.append('%d hours' % hours)
elif hours:
out.append('1 hour')
if minutes > 1:
out.append('%d minutes' % minutes)
elif minutes:
out.append('1 minute')
if include_seconds or (not hours and not minutes):
if seconds > 1:
out.append('%d seconds' % seconds)
elif seconds:
out.append('1 second')
else:
out.append('0 seconds')
return ' '.join(out)
@library.global_function
def show_duration_compact(duration):
hours = duration / 3600
seconds = duration % 3600
minutes = seconds / 60
seconds = seconds % 60
out = []
if hours:
out.append('%dh' % hours)
if hours or minutes:
out.append('%dm' % minutes)
if hours or minutes or seconds:
out.append('%ds' % seconds)
return ''.join(out)
@library.global_function
def mozillians_permalink(username):
return 'https://mozillians.org/u/%s' % urlquote(username)
@library.filter
def urlize_and_linebreaksbr(text):
html = urlize(text)
html = linebreaksbr(html)
return html
|
{
"content_hash": "ec902d1529ccfe6109d65213aa1584e2",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 66,
"avg_line_length": 25.875,
"alnum_prop": 0.6586151368760065,
"repo_name": "mozilla/airmozilla",
"id": "b99c6c1c59b817003a3cd6d8f2c02996ef841426",
"size": "2484",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airmozilla/base/templatetags/jinja_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4527"
},
{
"name": "Brightscript",
"bytes": "67473"
},
{
"name": "CSS",
"bytes": "140471"
},
{
"name": "HTML",
"bytes": "346961"
},
{
"name": "JavaScript",
"bytes": "1277145"
},
{
"name": "Makefile",
"bytes": "12447"
},
{
"name": "Python",
"bytes": "2149189"
},
{
"name": "Shell",
"bytes": "3103"
},
{
"name": "Smarty",
"bytes": "3010"
}
],
"symlink_target": ""
}
|
""" A TVTK scene editor. """
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from apptools.preferences.api import get_default_preferences
from tvtk.pyface.tvtk_scene import TVTKScene
from tvtk.pyface.api import DecoratedScene
from pyface.workbench.api import Editor
from traits.api import Instance
#### Handy functions ##########################################################
def _id_generator():
""" Return an ever-increasing number useful for creating unique Ids. """
n = 1
while True:
yield(n)
n += 1
_id_generator = _id_generator()
class SceneEditor(Editor):
""" A TVTK scene editor. """
#### 'SceneEditor' interface ##############################################
# The TVTK scene object.
scene = Instance(TVTKScene)
###########################################################################
# 'IWorkbenchPart' interface.
###########################################################################
#### Trait initializers ###################################################
def _id_default(self):
""" Trait initializer. """
return self.name
def _name_default(self):
""" Trait initializer. """
return 'TVTK Scene %d' % (next(_id_generator))
#### Methods ##############################################################
def create_control(self, parent):
""" Create the toolkit-specific control that represents the editor. """
# We hold a reference to the scene itself to make sure it does not get
# garbage collected (because we only return the scene's 'control' not
# the scene itself). The scene is also referenced by the scene manager.
self.scene = self._create_decorated_scene(parent)
self.scene.render()
return self.scene.control
def destroy_control(self):
""" Destroy the toolkit-specific control that represents the
editor.
"""
if self.scene is not None:
# Close the scene to cleanly shut it down.
self.scene.close()
# Call the parent method.
return super(SceneEditor, self).destroy_control()
###########################################################################
# Private interface.
###########################################################################
def _create_decorated_scene(self, parent):
""" Create a new decorated scene. """
pref = get_default_preferences()
stereo = eval(pref.get('tvtk.scene.stereo'))
scene = DecoratedScene(parent, stereo=stereo)
# Set the scene's traits to preference values.
scene.magnification = \
eval(pref.get('tvtk.scene.magnification'))
fg = eval(pref.get('tvtk.scene.foreground_color'))
bg = eval(pref.get('tvtk.scene.background_color'))
scene.foreground = fg
scene.background = bg
# FIXME: This seems necessary for some strange reason, if not
# the actual background of the renderer never gets set even
# though the renderer and the scene's background are synced.
scene.renderer.background = scene.background
return scene
#### EOF ######################################################################
|
{
"content_hash": "9aa80ab18f061006a393f218e3944174",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 32.50961538461539,
"alnum_prop": 0.5226264418811003,
"repo_name": "dmsurti/mayavi",
"id": "a50e06e19b87d2fde92c0d7553051d4279c11209",
"size": "3381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvtk/plugins/scene/scene_editor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2494055"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frbb', '0006_poem_category'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='withdrawn',
field=models.IntegerField(default=0),
),
]
|
{
"content_hash": "05d95235897380b53727c924f6cbc1b9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.592,
"repo_name": "seriouscamp/frbb",
"id": "1873936a2f3d6b512ac40be5659d6409f38aca8e",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/frbb/migrations/0007_userprofile_withdrawn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4171"
},
{
"name": "HTML",
"bytes": "29095"
},
{
"name": "JavaScript",
"bytes": "1152"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "79551"
}
],
"symlink_target": ""
}
|
from invoke.tasks import task
@task(aliases=('bar', 'otherbar'))
def foo(ctx):
"""
Foo the bar.
"""
pass
@task
def foo2(ctx):
"""
Foo the bar:
example code
Added in 1.0
"""
pass
@task
def foo3(ctx):
"""Foo the other bar:
example code
Added in 1.1
"""
pass
@task(default=True)
def biz(ctx):
pass
@task(help={'why': 'Motive', 'who': 'Who to punch'})
def punch(ctx, who, why):
pass
@task(positional=['pos'])
def one_positional(ctx, pos, nonpos):
pass
@task(positional=['pos1', 'pos2'])
def two_positionals(ctx, pos1, pos2, nonpos):
pass
@task
def implicit_positionals(ctx, pos1, pos2, nonpos=None):
pass
@task(optional=['myopt'])
def optional_values(ctx, myopt):
pass
|
{
"content_hash": "80b6859a2da2ee3559fa102f0b717845",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 55,
"avg_line_length": 14.203703703703704,
"alnum_prop": 0.590612777053455,
"repo_name": "pfmoore/invoke",
"id": "0518c8bde5367009f38b3c7abc1195b3a482ddde",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/_support/decorator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "876"
},
{
"name": "Python",
"bytes": "382856"
},
{
"name": "Shell",
"bytes": "2763"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0038_auto_20171129_0616'),
('product', '0037_auto_20171129_1004'),
]
operations = [
]
|
{
"content_hash": "0fce4d441f46ad9d880b6193447a322c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 47,
"avg_line_length": 18.928571428571427,
"alnum_prop": 0.6339622641509434,
"repo_name": "UITools/saleor",
"id": "7c0b8078c4ecdb87e982b70509ae95f620af754a",
"size": "338",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "saleor/product/migrations/0039_merge_20171130_0727.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
}
|
"""
Utility functions for bAbI example and demo.
"""
from neon.data import BABI
from neon.initializers import GlorotUniform, Uniform, Orthonormal
from neon.layers import Affine, GRU, LookupTable, MergeMultistream, LSTM
from neon.models import Model
from neon.transforms import Logistic, Softmax, Tanh
# list of bAbI tasks
subset = 'en'
task_list = [
'qa1_single-supporting-fact',
'qa2_two-supporting-facts',
'qa3_three-supporting-facts',
'qa4_two-arg-relations',
'qa5_three-arg-relations',
'qa6_yes-no-questions',
'qa7_counting',
'qa8_lists-sets',
'qa9_simple-negation',
'qa10_indefinite-knowledge',
'qa11_basic-coreference',
'qa12_conjunction',
'qa13_compound-coreference',
'qa14_time-reasoning',
'qa15_basic-deduction',
'qa16_basic-induction',
'qa17_positional-reasoning',
'qa18_size-reasoning',
'qa19_path-finding',
'qa20_agents-motivations',
]
def babi_handler(data_dir, task_number):
"""
Handle for bAbI task.
Args:
data_dir (string) : Path to bAbI data directory.
task_number (int) : The task ID from the bAbI dataset (1-20).
Returns:
BABI : Handler for bAbI task.
"""
task = task_list[task_number - 1]
return BABI(path=data_dir, task=task, subset=subset)
def create_model(vocab_size, rlayer_type):
"""
Create LSTM/GRU model for bAbI dataset.
Args:
vocab_size (int) : String of bAbI data.
rlayer_type (string) : Type of recurrent layer to use (gru or lstm).
Returns:
Model : Model of the created network
"""
# recurrent layer parameters (default gru)
rlayer_obj = GRU if rlayer_type == 'gru' else LSTM
rlayer_params = dict(output_size=100, reset_cells=True,
init=GlorotUniform(), init_inner=Orthonormal(0.5),
activation=Tanh(), gate_activation=Logistic())
# if using lstm, swap the activation functions
if rlayer_type == 'lstm':
rlayer_params.update(dict(activation=Logistic(), gate_activation=Tanh()))
# lookup layer parameters
lookup_params = dict(vocab_size=vocab_size, embedding_dim=50, init=Uniform(-0.05, 0.05))
# Model construction
story_path = [LookupTable(**lookup_params), rlayer_obj(**rlayer_params)]
query_path = [LookupTable(**lookup_params), rlayer_obj(**rlayer_params)]
layers = [MergeMultistream(layers=[story_path, query_path], merge="stack"),
Affine(vocab_size, init=GlorotUniform(), activation=Softmax())]
return Model(layers=layers)
|
{
"content_hash": "06737afa6f98d25c888caa29655496a7",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 92,
"avg_line_length": 30.96385542168675,
"alnum_prop": 0.6571984435797665,
"repo_name": "NervanaSystems/neon",
"id": "a9a30baf1e9396da45a7ce25608dc6c8a56e907b",
"size": "3318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/babi/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "267"
},
{
"name": "C",
"bytes": "125018"
},
{
"name": "CSS",
"bytes": "1484011"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Dockerfile",
"bytes": "1483"
},
{
"name": "Makefile",
"bytes": "12033"
},
{
"name": "Perl",
"bytes": "130967"
},
{
"name": "Python",
"bytes": "2113676"
},
{
"name": "Shell",
"bytes": "5711"
}
],
"symlink_target": ""
}
|
__author__ = 'tyler'
import os
import project
class Workspace(object):
def __init__(self, path):
self.path = path
self.projects = self.set_projects(path)
def find_projects(self, path):
projects = []
folders = [os.path.join(path, o) for o in os.listdir(path) if
(os.path.isdir(os.path.join(path, o)) and os.path.basename(os.path.join(path, o))[0] != '.')]
for folder in folders:
if (os.path.isdir(folder + '/.git')):
projects.append(folder)
return projects
def set_projects(self, path):
project_paths = self.find_projects(path)
projects = []
for project_path in project_paths:
projects.append(project.Project(project_path))
return projects
def find_workspaces(self):
workspaces = {}
for project in self.projects:
for branch in project.branches:
if('workspace/' in branch):
workspaces[branch] = True
return list(workspaces.keys())
|
{
"content_hash": "c8d91f01cafdf9d37b95ccf77ac6794d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 110,
"avg_line_length": 32.06060606060606,
"alnum_prop": 0.5708884688090737,
"repo_name": "tylerhjones/workspacer",
"id": "7a123504b1bed0ec75f33a2272de7cd5ab65643d",
"size": "1058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2862"
}
],
"symlink_target": ""
}
|
from blazeweb.views import asview
from blazeweb.wrappers import Response
@asview()
def helloworld():
return 'Hello World'
@asview('/mms')
def make_me_shorter():
return 'make_me_shorter'
@asview('/hw/<tome>')
def helloto(tome='World'):
return 'Hello %s' % tome
@asview('/hw2/<tome>')
def helloto2(nothere=None, tome='World'):
return 'hw2 %s' % tome
@asview('/flexible/<SEOonly>')
def flexible():
return 'thats cool'
@asview('/cooler/<SEOonly>', getargs=('foo', 'bar'))
def cooler(SEOonly=None, foo=None, bar=None, willstaynone=None):
return '%s, %s, %s, %s' % (foo, bar, SEOonly, willstaynone)
@asview('/ap/<foo>', getargs=('foo'))
def argprecedence(foo=None):
return str(foo)
@asview('/tolist', getargs=('foo'))
def tolist(foo=None):
return ', '.join(foo)
@asview('/wontwork')
def wontwork(foo):
return 'foo'
@asview('/positional', getargs=('foo'))
def positional(foo):
return foo
@asview('/positional/<foo>')
def positionalurl(foo):
return foo
@asview('/positional3/<foo>', getargs=('baz'))
def positionalurl3(foo, baz):
return foo
@asview()
def cssresponse():
return Response('body {color:black}', mimetype='text/css')
@asview()
def returnwsgiapp():
"""
could have just as easily returned Response(), but I wanted to do
something different!
"""
def hello_world(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return [b'wsgi hw']
return hello_world
|
{
"content_hash": "68b268ce9a9accd55fe72b8a403bca08",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 73,
"avg_line_length": 19.164556962025316,
"alnum_prop": 0.6393659180977543,
"repo_name": "level12/blazeweb",
"id": "5547e820549e1c0b675412797f6cd19495d58727",
"size": "1514",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/apps/minimal1/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "544163"
},
{
"name": "JavaScript",
"bytes": "88"
},
{
"name": "Python",
"bytes": "434794"
}
],
"symlink_target": ""
}
|
import cv2
from geosolver.diagram.draw_on_image import draw_point, draw_instance, draw_label
from geosolver.ontology.ontology_semantics import evaluate
from geosolver.utils.prep import display_image
__author__ = 'minjoon'
class ImageSegment(object):
def __init__(self, segmented_image, sliced_image, binarized_segmented_image, pixels, offset, key):
self.sliced_image = sliced_image
self.segmented_image = segmented_image
self.binarized_segmented_image = binarized_segmented_image
self.pixels = pixels
self.offset = offset
self.shape = segmented_image.shape
self.key = key
self.area = segmented_image.shape[0] * segmented_image.shape[1]
def display_segmented_image(self, block=True):
display_image(self.segmented_image, block=block)
def display_binarized_segmented_image(self, block=True):
display_image(self.binarized_segmented_image, block=block)
def display_pixels(self, block=True):
image = cv2.cvtColor(self.segmented_image, cv2.COLOR_GRAY2BGR)
for pixel in self.pixels:
draw_point(image, pixel)
display_image(image, block=block)
class ImageSegmentParse(object):
def __init__(self, original_image, diagram_image_segment, label_image_segments):
"""
:param numpy.ndarray original_image:
:param ImageSegment diagram_image_segment:
:param dict label_image_segments:
:return:
"""
assert isinstance(diagram_image_segment, ImageSegment)
assert isinstance(label_image_segments, dict)
self.original_image = original_image
self.diagram_image_segment = diagram_image_segment
self.label_image_segments = label_image_segments
def get_colored_original_image(self):
return cv2.cvtColor(self.original_image, cv2.COLOR_GRAY2BGR)
def display_diagram(self):
self.diagram_image_segment.display_segmented_image()
def display_labels(self):
for image_segment in self.label_image_segments.values():
image_segment.display_segmented_image()
def get_image_instances(self, instances, **kwargs):
image = self.get_colored_original_image()
for instance in instances:
draw_instance(image, instance, offset=self.diagram_image_segment.offset, **kwargs)
return image
def display_instances(self, instances, block=True, **kwargs):
display_image(self.get_image_instances(instances, **kwargs), block=block)
class PrimitiveParse(object):
def __init__(self, image_segment_parse, lines, circles):
assert isinstance(image_segment_parse, ImageSegmentParse)
self.image_segment_parse = image_segment_parse
self.lines = lines
self.circles = circles
z = lines.copy()
z.update(circles)
self.primitives = z
# self.primitives = dict(lines.items() + circles.items())
def display_primitives(self, block=True, **kwargs):
self.image_segment_parse.display_instances(self.primitives.values(), block=block, **kwargs)
def get_image_primitives(self, **kwargs):
return self.image_segment_parse.get_image_instances(self.primitives.values(), **kwargs)
def display_each_primitive(self, **kwargs):
for primitive in self.primitives.values():
self.image_segment_parse.display_instances([primitive], block=True, **kwargs)
class CoreParse(object):
def __init__(self, primitive_parse, intersection_points, point_variables, circles, radius_variables, assignment):
assert isinstance(primitive_parse, PrimitiveParse)
self.image_segment_parse = primitive_parse.image_segment_parse
self.primitive_parse = primitive_parse
self.intersection_points = intersection_points
self.circles = circles
self.point_variables = point_variables
self.radius_variables = radius_variables
self.variable_assignment = assignment
def evaluate(self, formula):
return evaluate(formula, self.variable_assignment)
def is_grounded(self, formula):
return formula.is_grounded(self.variable_assignment.keys())
def get_image_points(self, **kwargs):
image = self.image_segment_parse.get_colored_original_image()
offset = self.image_segment_parse.diagram_image_segment.offset
for key, point in self.intersection_points.items():
label = Label("%d" % key, point)
draw_label(image, label, offset=offset, **kwargs)
draw_point(image, point, offset=offset, **kwargs)
return image
def display_points(self, block=True, **kwargs):
image = self.get_image_points(**kwargs)
display_image(image, block=block)
class GraphParse(object):
# TODO :
def __init__(self, core_parse, line_graph, circle_dict, arc_graphs):
assert isinstance(core_parse, CoreParse)
self.core_parse = core_parse
self.primitive_parse = core_parse.primitive_parse
self.image_segment_parse = core_parse.primitive_parse.image_segment_parse
self.line_graph = line_graph # Undirected graph
self.circle_dict = circle_dict
self.arc_graphs = arc_graphs # Directed graph
self.intersection_points = core_parse.intersection_points
self.point_variables = core_parse.point_variables
self.radius_variables = core_parse.radius_variables
def display_instances(self, instances, block=True, **kwargs):
self.image_segment_parse.display_instances(instances, block=block, **kwargs)
class Label:
def __init__(self, text, position):
self.text = text
self.position = position
class ImageLabelParse:
def __init__(self, image, labels):
self.image = image
self.labels = labels
def get_labeled_image(self, **kwargs):
image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
for label in self.labels.values():
draw_label(image, label, **kwargs)
draw_point(image, label.position)
return image
|
{
"content_hash": "98052fdd53d88d904734362f330a9c7f",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 117,
"avg_line_length": 39.116129032258065,
"alnum_prop": 0.6740887349496949,
"repo_name": "lupantech/InterGPS",
"id": "d969868a11bf7c5b51fb6c6df7abb51c062861e3",
"size": "6063",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "diagram_parser/parser/geosolver/diagram/states.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2625"
},
{
"name": "HTML",
"bytes": "13069"
},
{
"name": "JavaScript",
"bytes": "10575"
},
{
"name": "Makefile",
"bytes": "520"
},
{
"name": "Python",
"bytes": "901525"
},
{
"name": "Shell",
"bytes": "5150"
}
],
"symlink_target": ""
}
|
from functools import partial
import logging
import unittest
# 3rd
from mock import Mock
# project
from tests.checks.common import Fixtures
log = logging.getLogger(__name__)
WMISampler = None
# Thoughts
# Log WMI activity
# Mechanism to timeout
# Check when pywintypes.com_error are raised
# Check the role of the flags
def load_fixture(f, args=None):
"""
Build a WMI query result from a file and given parameters.
"""
properties = []
def extract_line(line):
"""
Extract a property name, value and the qualifiers from a fixture line.
Return (property name, property value, property qualifiers)
"""
property_counter_type = ""
try:
property_name, property_value, property_counter_type = line.split(" ")
except ValueError:
property_name, property_value = line.split(" ")
property_qualifiers = [Mock(Name='CounterType', Value=int(property_counter_type))] \
if property_counter_type else []
return property_name, property_value, property_qualifiers
# Build from file
data = Fixtures.read_file(f)
for l in data.splitlines():
property_name, property_value, property_qualifiers = extract_line(l)
properties.append(
Mock(Name=property_name, Value=property_value, Qualifiers_=property_qualifiers)
)
# Append extra information
if args:
property_name, property_value = args
properties.append(Mock(Name=property_name, Value=property_value, Qualifiers_=[]))
return [Mock(Properties_=properties)]
class Counter(object):
def __init__(self):
self.value = 0
def __iadd__(self, other):
self.value += other
return self
def __eq__(self, other):
return self.value == other
def __str__(self):
return str(self.value)
def reset(self):
self.value = 0
class SWbemServices(object):
"""
SWbemServices a.k.a. (mocked) WMI connection.
Save connection parameters so it can be tested.
"""
_exec_query_call_count = Counter()
def __init__(self, wmi_conn_args):
super(SWbemServices, self).__init__()
self._wmi_conn_args = wmi_conn_args
self._last_wmi_query = None
self._last_wmi_flags = None
@classmethod
def reset(cls):
"""
FIXME - Dirty patch to reset `SWbemServices.ExecQuery` to 0.
"""
cls._exec_query_call_count.reset()
def get_conn_args(self):
"""
Return parameters used to set up the WMI connection.
"""
return self._wmi_conn_args
def get_last_wmi_query(self):
"""
Return the last WMI query submitted via the WMI connection.
"""
return self._last_wmi_query
def get_last_wmi_flags(self):
"""
Return the last WMI flags submitted via the WMI connection.
"""
return self._last_wmi_flags
def ExecQuery(self, query, query_language, flags):
SWbemServices._exec_query_call_count += 1
self._last_wmi_query = query
self._last_wmi_flags = flags
results = []
if query == "Select AvgDiskBytesPerWrite,FreeMegabytes from Win32_PerfFormattedData_PerfDisk_LogicalDisk": # noqa
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "C:"))
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "D:"))
if query == "Select CounterRawCount,CounterCounter,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
# Mock a previous and a current sample
sample_file = "win32_perfrawdata_perfos_system_previous" if flags == 131120\
else "win32_perfrawdata_perfos_system_current"
results += load_fixture(sample_file, ("Name", "C:"))
results += load_fixture(sample_file, ("Name", "D:"))
if query == "Select UnknownCounter,MissingProperty,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
results += load_fixture("win32_perfrawdata_perfos_system_unknown", ("Name", "C:"))
results += load_fixture("win32_perfrawdata_perfos_system_unknown", ("Name", "D:"))
if query == "Select IOReadBytesPerSec,IDProcess from Win32_PerfFormattedData_PerfProc_Process WHERE Name = 'chrome'" \
or query == "Select IOReadBytesPerSec,UnknownProperty from Win32_PerfFormattedData_PerfProc_Process WHERE Name = 'chrome'": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process")
if query == "Select IOReadBytesPerSec,ResultNotMatchingAnyTargetProperty from Win32_PerfFormattedData_PerfProc_Process WHERE Name = 'chrome'": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process_alt")
if query == "Select CommandLine from Win32_Process WHERE Handle = '4036'" \
or query == "Select UnknownProperty from Win32_Process WHERE Handle = '4036'":
results += load_fixture("win32_process")
return results
ExecQuery.call_count = _exec_query_call_count
class Dispatch(object):
"""
Mock for win32com.client Dispatch class.
"""
_connect_call_count = Counter()
def __init__(self, *args, **kwargs):
pass
@classmethod
def reset(cls):
"""
FIXME - Dirty patch to reset `ConnectServer.call_count` to 0.
"""
cls._connect_call_count.reset()
def ConnectServer(self, *args, **kwargs):
"""
Return a WMI connection, a.k.a. a SWbemServices object.
"""
Dispatch._connect_call_count += 1
wmi_conn_args = (args, kwargs)
return SWbemServices(wmi_conn_args)
ConnectServer.call_count = _connect_call_count
class TestCommonWMI(unittest.TestCase):
"""
Common toolbox for WMI unit testing.
"""
def setUp(self):
"""
Mock WMI related Python packages, so it can be tested on any environment.
"""
import sys
global WMISampler
sys.modules['pywintypes'] = Mock()
sys.modules['win32com'] = Mock()
sys.modules['win32com.client'] = Mock(Dispatch=Dispatch)
from checks.libs.wmi import sampler
WMISampler = partial(sampler.WMISampler, log)
def tearDown(self):
"""
Reset Mock counters, flush samplers and connections
"""
# Reset counters
Dispatch.reset()
SWbemServices.reset()
# Flush cache
from checks.libs.wmi.sampler import WMISampler
WMISampler._wmi_locators = {}
WMISampler._wmi_connections = {}
def assertWMIConnWith(self, wmi_sampler, param):
"""
Helper, assert that the WMI connection was established with the right parameter and value.
"""
wmi_instance = wmi_sampler._get_connection()
wmi_conn_args, wmi_conn_kwargs = wmi_instance.get_conn_args()
if isinstance(param, tuple):
key, value = param
self.assertIn(key, wmi_conn_kwargs)
self.assertEquals(wmi_conn_kwargs[key], value)
else:
self.assertIn(param, wmi_conn_args)
def assertWMIQuery(self, wmi_sampler, query=None, flags=None):
"""
Helper, assert that the given WMI query and flags were submitted.
"""
wmi_instance = wmi_sampler._get_connection()
if query:
last_wmi_query = wmi_instance.get_last_wmi_query()
self.assertEquals(last_wmi_query, query)
if flags:
last_wmi_flags = wmi_instance.get_last_wmi_flags()
self.assertEquals(last_wmi_flags, flags)
def assertWMIObject(self, wmi_obj, property_names):
"""
Assert the WMI object integrity.
"""
for prop in property_names:
self.assertIn(prop, wmi_obj)
def assertIn(self, first, second):
"""
Assert `first` in `second`.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
class TestUnitWMISampler(TestCommonWMI):
"""
Unit tests for WMISampler.
"""
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
wmi_sampler = WMISampler(
"Win32_PerfRawData_PerfOS_System",
["ProcessorQueueLength"],
host="myhost",
namespace="some/namespace",
username="datadog",
password="password"
)
wmi_conn = wmi_sampler._get_connection()
# WMI connection is cached
self.assertIn('myhost:some/namespace:datadog', wmi_sampler._wmi_connections)
# Connection was established with the right parameters
self.assertWMIConnWith(wmi_sampler, "myhost")
self.assertWMIConnWith(wmi_sampler, "some/namespace")
def test_wmi_connection_pooling(self):
"""
Share WMI connections among WMISampler objects.
"""
from win32com.client import Dispatch
wmi_sampler_1 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"])
wmi_sampler_2 = WMISampler("Win32_OperatingSystem", ["TotalVisibleMemorySize"])
wmi_sampler_3 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], host="myhost") # noqa
wmi_sampler_1.sample()
wmi_sampler_2.sample()
self.assertEquals(Dispatch.ConnectServer.call_count, 1, Dispatch.ConnectServer.call_count)
wmi_sampler_3.sample()
self.assertEquals(Dispatch.ConnectServer.call_count, 2, Dispatch.ConnectServer.call_count)
def test_wql_filtering(self):
"""
Format the filters to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
no_filters = []
filters = [{'Name': "SomeName"}, {'Id': "SomeId"}]
self.assertEquals("", format_filter(no_filters))
self.assertEquals(" WHERE Id = 'SomeId' AND Name = 'SomeName'",
format_filter(filters))
def test_wmi_query(self):
"""
Query WMI using WMI Query Language (WQL).
"""
# No filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertWMIQuery(
wmi_sampler,
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
)
# Single filter
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:"}])
wmi_sampler.sample()
self.assertWMIQuery(
wmi_sampler,
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE Name = 'C:'"
)
# Multiple filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:"}, {'Id': "123"}])
wmi_sampler.sample()
self.assertWMIQuery(
wmi_sampler,
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE Id = '123' AND Name = 'C:'"
)
def test_wmi_parser(self):
"""
Parse WMI objects from WMI query results.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
# Assert `results`
expected_results = [
{
'freemegabytes': 19742.0,
'name': 'C:',
'avgdiskbytesperwrite': 1536.0
}, {
'freemegabytes': 19742.0,
'name': 'D:',
'avgdiskbytesperwrite': 1536.0
}
]
self.assertEquals(wmi_sampler, expected_results, wmi_sampler)
def test_wmi_sampler_iterator_getter(self):
"""
Iterate/Get on the WMISampler object iterates/gets on its current sample.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertEquals(len(wmi_sampler), 2)
# Using an iterator
for wmi_obj in wmi_sampler:
self.assertWMIObject(wmi_obj, ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
# Using an accessor
for index in xrange(0, 2):
self.assertWMIObject(wmi_sampler[index], ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
def test_raw_perf_properties(self):
"""
Extend the list of properties to query for RAW Performance classes.
"""
# Formatted Performance class
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfOS_System", ["ProcessorQueueLength"])
self.assertEquals(len(wmi_sampler.property_names), 1)
# Raw Performance class
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
self.assertEquals(len(wmi_sampler.property_names), 4)
def test_raw_initial_sampling(self):
"""
Query for initial sample for RAW Performance classes.
"""
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_sampler.sample()
# 2 queries should have been made: one for initialization, one for sampling
self.assertEquals(SWbemServices.ExecQuery.call_count, 2, SWbemServices.ExecQuery.call_count)
# Repeat
wmi_sampler.sample()
self.assertEquals(SWbemServices.ExecQuery.call_count, 3, SWbemServices.ExecQuery.call_count)
def test_raw_cache_qualifiers(self):
"""
Cache the qualifiers on the first query against RAW Performance classes.
"""
# Append `flag_use_amended_qualifiers` flag on the first query
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler._query()
self.assertWMIQuery(wmi_raw_sampler, flags=131120)
wmi_raw_sampler._query()
self.assertWMIQuery(wmi_raw_sampler, flags=48)
# Qualifiers are cached
self.assertTrue(wmi_raw_sampler.property_counter_types)
self.assertIn('CounterRawCount', wmi_raw_sampler.property_counter_types)
self.assertIn('CounterCounter', wmi_raw_sampler.property_counter_types)
def test_raw_properties_formatting(self):
"""
WMI Object's RAW data are returned formatted.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler.sample()
self.assertEquals(len(wmi_raw_sampler), 2)
# Using an iterator
for wmi_obj in wmi_raw_sampler:
self.assertWMIObject(wmi_obj, ["CounterRawCount", "CounterCounter", "Timestamp_Sys100NS", "Frequency_Sys100NS", "name"]) # noqa
self.assertEquals(wmi_obj['CounterRawCount'], 500)
self.assertEquals(wmi_obj['CounterCounter'], 50)
# Using an accessor
for index in xrange(0, 2):
self.assertWMIObject(wmi_raw_sampler[index], ["CounterRawCount", "CounterCounter", "Timestamp_Sys100NS", "Frequency_Sys100NS", "name"]) # noqa
self.assertEquals(wmi_raw_sampler[index]['CounterRawCount'], 500)
self.assertEquals(wmi_raw_sampler[index]['CounterCounter'], 50)
def test_raw_properties_fallback(self):
"""
Print a warning on RAW Performance classes if the calculator is undefined.
Returns the original RAW value.
"""
from checks.libs.wmi.sampler import WMISampler
logger = Mock()
wmi_raw_sampler = WMISampler(logger, "Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertEquals(len(wmi_raw_sampler), 2)
for wmi_obj in wmi_raw_sampler:
self.assertWMIObject(wmi_obj, ["UnknownCounter", "Timestamp_Sys100NS", "Frequency_Sys100NS", "name"]) # noqa
self.assertEquals(wmi_obj['UnknownCounter'], 999)
self.assertTrue(logger.warning.called)
def test_missing_property(self):
"""
Do not raise on missing properties.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertEquals(len(wmi_raw_sampler), 2)
for wmi_obj in wmi_raw_sampler:
# Access a non existent property
self.assertFalse(wmi_obj['MissingProperty'])
class TestIntegrationWMI(unittest.TestCase):
"""
Integration tests for WMISampler.
"""
pass
|
{
"content_hash": "0dc443074505d9d685403aea096dad45",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 158,
"avg_line_length": 35.07185628742515,
"alnum_prop": 0.6160719367139036,
"repo_name": "amalakar/dd-agent",
"id": "98c93884e0da6ce962bac4a0cfb6e29d3050f8d4",
"size": "17580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/core/test_wmi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "1817113"
},
{
"name": "Ruby",
"bytes": "97536"
},
{
"name": "Shell",
"bytes": "51920"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
import datetime
import os
import shutil
import sys
import yaml
def gen_vehicle_controller_header(content, output_dir):
controller_header_tpl_file = "template/controller.h.tpl"
car_type = content["car_type"]
with open(controller_header_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
controller_header_file = output_dir + (
"%s_controller.h" % content["car_type"].lower())
with open(controller_header_file, 'w') as header:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["car_type_upper"] = car_type.upper()
fmt_val["car_type_cap"] = car_type.capitalize()
control_protocol_include_list = []
control_protocol_include_fmt = "#include \"modules/canbus/vehicle/%s/protocol/%s.h\""
control_protocol_ptr_list = []
control_protocol_ptr_fmt = " %s* %s_ = nullptr;"
protocols = content["protocols"]
for pid in protocols:
p = protocols[pid]
if p["protocol_type"] == "control":
name = p["name"]
include = control_protocol_include_fmt % (car_type.lower(),
name.lower())
control_protocol_include_list.append(include)
var_classname = name.replace('_', '').capitalize()
var_ptr = control_protocol_ptr_fmt % (var_classname, name)
control_protocol_ptr_list.append(var_ptr)
control_protocol_include_list.sort()
control_protocol_ptr_list.sort()
fmt_val["control_protocol_include_list"] = "\n".join(
control_protocol_include_list)
fmt_val["control_protocol_ptr_list"] = "\n".join(
control_protocol_ptr_list)
header.write(FMT % fmt_val)
def gen_vehicle_controller_cpp(content, output_dir):
controller_cpp_tpl_file = "template/controller.cc.tpl"
with open(controller_cpp_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
car_type = content["car_type"]
controller_cpp_file = output_dir + ("%s_controller.cc" % car_type.lower())
with open(controller_cpp_file, 'w') as cpp:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_lower"] = car_type.lower()
fmt_val["car_type_cap"] = car_type.capitalize()
protocol_ptr_get_list = []
protocol_ptr_get_fmt = """ %(var_name)s_ = dynamic_cast<%(class_name)s*>
(message_manager_->GetMutableProtocolDataById(%(class_name)s::ID));
if (%(var_name)s_ == nullptr) {
AERROR << "%(class_name)s does not exist in the %(car_type)sMessageManager!";
return ErrorCode::CANBUS_ERROR;
}
"""
protocol_add_list = []
protocol_add_fmt = " can_sender_->AddMessage(%s::ID, %s_, false);"
protocols = content["protocols"]
for pid in protocols:
p = protocols[pid]
if p["protocol_type"] == "control":
var_name = p["name"].lower()
class_name = p["name"].replace('_', '').capitalize()
ptr_get_fmt_val = {}
ptr_get_fmt_val["var_name"] = var_name
ptr_get_fmt_val["class_name"] = class_name
ptr_get_fmt_val["car_type"] = car_type.capitalize()
ptr_get = protocol_ptr_get_fmt % ptr_get_fmt_val
protocol_ptr_get_list.append(ptr_get)
protocol_add = protocol_add_fmt % (class_name, var_name)
protocol_add_list.append(protocol_add)
protocol_ptr_get_list.sort()
protocol_add_list.sort()
fmt_val["protocol_ptr_get_list"] = "\n".join(protocol_ptr_get_list)
fmt_val["protocol_add_list"] = "\n".join(protocol_add_list)
cpp.write(FMT % fmt_val)
def gen_message_manager_header(content, output_dir):
message_manager_header_tpl_file = "template/message_manager.h.tpl"
with open(message_manager_header_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
car_type = content["car_type"]
message_manager_header_file = output_dir + (
"%s_message_manager.h" % car_type.lower())
with open(message_manager_header_file, 'w') as header:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_namespace"] = car_type.lower()
fmt_val["car_type_cap"] = car_type.capitalize()
fmt_val["car_type_up"] = car_type.upper()
header.write(FMT % fmt_val)
def gen_message_manager_cpp(content, output_dir):
message_manager_cpp_tpl_file = "template/message_manager.cc.tpl"
with open(message_manager_cpp_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
car_type = content["car_type"]
message_manager_cpp_file = output_dir + (
"%s_message_manager.cc" % car_type.lower())
with open(message_manager_cpp_file, 'w') as cpp:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_lower"] = car_type.lower()
fmt_val["car_type_cap"] = car_type.capitalize()
protocols = content["protocols"]
control_header_list = []
report_header_list = []
header_fmt = "#include \"modules/canbus/vehicle/%s/protocol/%s.h\""
control_add_list = []
report_add_list = []
add_fmt = " Add%sProtocolData<%s, true>();"
for p_name in protocols:
p = protocols[p_name]
var_name = "%s" % p["name"].lower()
class_name = p["name"].replace('_', '').capitalize()
header = header_fmt % (car_type.lower(), var_name)
if p["protocol_type"] == "control":
control_header_list.append(header)
item = add_fmt % ("Send", class_name)
control_add_list.append(item)
elif p["protocol_type"] == "report":
report_header_list.append(header)
item = add_fmt % ("Recv", class_name)
report_add_list.append(item)
control_header_list.sort()
report_header_list.sort()
control_add_list.sort()
report_add_list.sort()
fmt_val["control_header_list"] = "\n".join(control_header_list)
fmt_val["report_header_list"] = "\n".join(report_header_list)
fmt_val["control_add_list"] = "\n".join(control_add_list)
fmt_val["report_add_list"] = "\n".join(report_add_list)
cpp.write(FMT % fmt_val)
def gen_vehicle_factory_header(content, output_dir):
vehicle_factory_header_tpl_file = "template/vehicle_factory.h.tpl"
with open(vehicle_factory_header_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
car_type = content["car_type"]
vehicle_factory_header_file = output_dir + (
"%s_vehicle_factory.h" % car_type.lower())
with open(vehicle_factory_header_file, 'w') as header:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_cap"] = car_type.capitalize()
fmt_val["car_type_upper"] = car_type.upper()
fmt_val["car_type_lower"] = car_type.lower()
header.write(FMT % fmt_val)
def gen_vehicle_factory_cpp(content, output_dir):
vehicle_factory_cpp_tpl_file = "template/vehicle_factory.cc.tpl"
with open(vehicle_factory_cpp_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
car_type = content["car_type"]
vehicle_factory_cpp_file = output_dir + (
"%s_vehicle_factory.cc" % car_type.lower())
with open(vehicle_factory_cpp_file, 'w') as cpp:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_lower"] = car_type.lower()
fmt_val["car_type_cap"] = car_type.capitalize()
fmt_val["car_type_upper"] = car_type.upper()
cpp.write(FMT % fmt_val)
def gen_build_file(content, output_dir):
build_tpl_file = "template/controller_manager_BUILD.tpl"
with open(build_tpl_file, 'r') as tpl:
fmt = tpl.readlines()
car_type = content["car_type"]
build_file = output_dir + "BUILD"
with open(build_file, 'w') as fp:
FMT = "".join(fmt)
fmt_val = {}
fmt_val["car_type_lower"] = car_type.lower()
fp.write(FMT % fmt_val)
def gen_vehicle_controller_and_manager(config_file, output_dir):
print("Generating controller and manager")
with open(config_file, 'r') as fp:
content = yaml.safe_load(fp)
gen_vehicle_controller_header(content, output_dir)
gen_vehicle_controller_cpp(content, output_dir)
gen_message_manager_header(content, output_dir)
gen_message_manager_cpp(content, output_dir)
gen_vehicle_factory_header(content, output_dir)
gen_vehicle_factory_cpp(content, output_dir)
gen_build_file(content, output_dir)
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage: python %s some_config.yml' % sys.argv[0])
sys.exit(0)
with open(sys.argv[1], 'r') as fp:
conf = yaml.safe_load(fp)
protocol_conf = conf["protocol_conf"]
output_dir = conf["output_dir"] + "vehicle/" + conf["car_type"].lower() + \
"/"
shutil.rmtree(output_dir, True)
os.makedirs(output_dir)
gen_vehicle_controller_and_manager(protocol_conf, output_dir)
|
{
"content_hash": "09fe0a73dbda52890d10784dd704042b",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 93,
"avg_line_length": 40.32743362831859,
"alnum_prop": 0.5822909809084924,
"repo_name": "ApolloAuto/apollo",
"id": "eb18ec65465abbc9e4d345538808d924ec7069dd",
"size": "9923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tools/gen_vehicle_protocol/gen_vehicle_controller_and_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1922"
},
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C",
"bytes": "66747"
},
{
"name": "C++",
"bytes": "19641274"
},
{
"name": "CMake",
"bytes": "3600"
},
{
"name": "Cuda",
"bytes": "221003"
},
{
"name": "Dockerfile",
"bytes": "8522"
},
{
"name": "GLSL",
"bytes": "7000"
},
{
"name": "HTML",
"bytes": "9768"
},
{
"name": "Handlebars",
"bytes": "991"
},
{
"name": "JavaScript",
"bytes": "461346"
},
{
"name": "Makefile",
"bytes": "6626"
},
{
"name": "Python",
"bytes": "1178328"
},
{
"name": "SCSS",
"bytes": "52149"
},
{
"name": "Shell",
"bytes": "783043"
},
{
"name": "Smarty",
"bytes": "33183"
},
{
"name": "Starlark",
"bytes": "1025633"
},
{
"name": "Vim Script",
"bytes": "161"
}
],
"symlink_target": ""
}
|
"""
Find the k-cores of a graph.
The k-core is found by recursively pruning nodes with degrees less than k.
See the following references for details:
An O(m) Algorithm for Cores Decomposition of Networks
Vladimir Batagelj and Matjaz Zaversnik, 2003.
http://arxiv.org/abs/cs.DS/0310049
Generalized Cores
Vladimir Batagelj and Matjaz Zaversnik, 2002.
http://arxiv.org/pdf/cs/0202039
For directed graphs a more general notion is that of D-cores which
looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core
is the k-core.
D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy
Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011.
http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf
"""
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import not_implemented_for
__all__ = ['core_number', 'find_cores', 'k_core',
'k_shell', 'k_crust', 'k_corona']
@not_implemented_for('multigraph')
def core_number(G):
"""Return the core number for each vertex.
A k-core is a maximal subgraph that contains nodes of degree k or more.
The core number of a node is the largest value k of a k-core containing
that node.
Parameters
----------
G : NetworkX graph
A graph or directed graph
Returns
-------
core_number : dictionary
A dictionary keyed by node to the core number.
Raises
------
NetworkXError
The k-core is not implemented for graphs with self loops
or parallel edges.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
Vladimir Batagelj and Matjaz Zaversnik, 2003.
http://arxiv.org/abs/cs.DS/0310049
"""
if G.number_of_selfloops() > 0:
msg = ('Input graph has self loops which is not permitted; '
'Consider using G.remove_edges_from(G.selfloop_edges()).')
raise NetworkXError(msg)
degrees = dict(G.degree())
# Sort nodes by degree.
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v]-curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
# The initial guess for the core number of a node is its degree.
core = degrees
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
find_cores = core_number
def _core_subgraph(G, k_filter, k=None, core=None):
"""Returns the subgraph induced by nodes passing filter ``k_filter``.
Parameters
----------
G : NetworkX graph
The graph or directed graph to process
k_filter : filter function
This function filters the nodes chosen. It takes three inputs:
A node of G, the filter's cutoff, and the core dict of the graph.
The function should return a Boolean value.
k : int, optional
The order of the core. If not specified use the max core number.
This value is used as the cutoff for the filter.
core : dict, optional
Precomputed core numbers keyed by node for the graph ``G``.
If not specified, the core numbers will be computed from ``G``.
"""
if core is None:
core = core_number(G)
if k is None:
k = max(core.values())
nodes = (v for v in core if k_filter(v, k, core))
return G.subgraph(nodes).copy()
def k_core(G, k=None, core_number=None):
"""Return the k-core of G.
A k-core is a maximal subgraph that contains nodes of degree k or more.
Parameters
----------
G : NetworkX graph
A graph or directed graph
k : int, optional
The order of the core. If not specified return the main core.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-core subgraph
Raises
------
NetworkXError
The k-core is not defined for graphs with self loops or parallel edges.
Notes
-----
The main core is the core with the largest degree.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
Vladimir Batagelj and Matjaz Zaversnik, 2003.
http://arxiv.org/abs/cs.DS/0310049
"""
def k_filter(v, k, c):
return c[v] >= k
return _core_subgraph(G, k_filter, k, core_number)
def k_shell(G, k=None, core_number=None):
"""Return the k-shell of G.
The k-shell is the subgraph induced by nodes with core number k.
That is, nodes in the k-core that are not in the (k+1)-core.
Parameters
----------
G : NetworkX graph
A graph or directed graph.
k : int, optional
The order of the shell. If not specified return the outer shell.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-shell subgraph
Raises
------
NetworkXError
The k-shell is not implemented for graphs with self loops
or parallel edges.
Notes
-----
This is similar to k_corona but in that case only neighbors in the
k-core are considered.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
k_corona
References
----------
.. [1] A model of Internet topology using k-shell decomposition
Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,
and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
http://www.pnas.org/content/104/27/11150.full
"""
def k_filter(v, k, c):
return c[v] == k
return _core_subgraph(G, k_filter, k, core_number)
def k_crust(G, k=None, core_number=None):
"""Return the k-crust of G.
The k-crust is the graph G with the k-core removed.
Parameters
----------
G : NetworkX graph
A graph or directed graph.
k : int, optional
The order of the shell. If not specified return the main crust.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-crust subgraph
Raises
------
NetworkXError
The k-crust is not implemented for graphs with self loops
or parallel edges.
Notes
-----
This definition of k-crust is different than the definition in [1]_.
The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] A model of Internet topology using k-shell decomposition
Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,
and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
http://www.pnas.org/content/104/27/11150.full
"""
# Default for k is one less than in _core_subgraph, so just inline.
# Filter is c[v] <= k
if core_number is None:
core_number = find_cores(G)
if k is None:
k = max(core_number.values()) - 1
nodes = (v for v in core_number if core_number[v] <= k)
return G.subgraph(nodes).copy()
def k_corona(G, k, core_number=None):
"""Return the k-corona of G.
The k-corona is the subgraph of nodes in the k-core which have
exactly k neighbours in the k-core.
Parameters
----------
G : NetworkX graph
A graph or directed graph
k : int
The order of the corona.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-corona subgraph
Raises
------
NetworkXError
The k-cornoa is not defined for graphs with self loops or
parallel edges.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] k -core (bootstrap) percolation on complex networks:
Critical phenomena and nonlocal effects,
A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes,
Phys. Rev. E 73, 056101 (2006)
http://link.aps.org/doi/10.1103/PhysRevE.73.056101
"""
def func(v, k, c):
return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)
return _core_subgraph(G, func, k, core_number)
|
{
"content_hash": "0fe6149fe94e0cad27e424b0cb232bef",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 77,
"avg_line_length": 28.617391304347827,
"alnum_prop": 0.6259495594044363,
"repo_name": "andnovar/networkx",
"id": "c98c7d77c65678cfcba19e2ab2571ec8b29cbd74",
"size": "10201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "networkx/algorithms/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3218696"
}
],
"symlink_target": ""
}
|
import os
import sys
from unittest import TestCase, main
from cliez.conf import settings, Settings
class Model(object):
config_none = settings()
def __init__(self):
self.config = settings()
pass
pass
class SettingsTests(TestCase):
def setUp(self):
sys.path.insert(0, os.path.dirname(__file__))
Settings.bind('res_settings.demo', __file__)
self.model = Model()
pass
def test_ok(self):
self.assertEqual(Settings, self.model.config.__class__)
self.assertEqual(2, self.model.config.public_var)
with self.assertRaises(AttributeError):
_ = self.model.config._private_var
pass
def test_is_none(self):
self.assertEqual(None, self.model.config_none)
pass
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "7b4714258b7f616cfe120e646fcfe556",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 63,
"avg_line_length": 19.976190476190474,
"alnum_prop": 0.6042908224076281,
"repo_name": "9nix00/cliez",
"id": "d84a3affe1a0cc3d245c9a2d87dd44310efef041",
"size": "839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40565"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20160406_1726'),
]
operations = [
migrations.AlterField(
model_name='post',
name='draft',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "6eeceed68d59296977daf8e1921f4f27",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.5904255319148937,
"repo_name": "smi96/django-blog_website",
"id": "4f69d5f7a168127cae5abaaea412a13941f15b37",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/posts/migrations/0004_auto_20160406_1820.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91572"
},
{
"name": "HTML",
"bytes": "75307"
},
{
"name": "JavaScript",
"bytes": "206947"
},
{
"name": "Python",
"bytes": "6324136"
},
{
"name": "Shell",
"bytes": "3779"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ConfigurationVariableConfig(AppConfig):
name = 'django_admin_conf_vars'
verbose_name = _(u'Configuration variables')
def ready(self):
from .global_vars import config
config.load_attributes()
|
{
"content_hash": "eee3078f8cb17b6be87cc33d50bf5c8f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.7264150943396226,
"repo_name": "maoaiz/django-admin-conf-vars",
"id": "5a8eaaabefd0731522e66889eadccedcfc38dd1d",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_admin_conf_vars/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "387"
},
{
"name": "Python",
"bytes": "9002"
}
],
"symlink_target": ""
}
|
"""
Run a Python script in the directory specified by **ctx.bldnode**.
Select a Python version by specifying the **version** keyword for
the task generator instance as integer 2 or 3. Default is 3.
Any string passed to the **add_to_pythonpath** keyword will be appended to the
PYTHONPATH environmetal variable; strings supplied to the **prepend** and
**append** keywords will be added to the command line.
Usage::
ctx(
features='run_py_script', version=3,
source='some_script.py',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv',
add_to_pythonpath='src/some/library',
append='',
prepend=''
)
"""
import os
from waflib import Task, TaskGen, Logs
def configure(conf):
conf.find_program('python', var='PYCMD', mandatory=False)
if not conf.env.PYCMD:
conf.fatal("No Python interpreter found!")
class run_py_script(Task.Task):
"""Run a Python script."""
run_str = '${PREPEND} ${PYCMD} ${SRC[0].abspath()} ${APPEND}'
shell = True
def exec_command(self, cmd, **kw):
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
if not self.buffer_output:
kw["stdout"] = kw["stderr"] = None
return bld.exec_command(cmd, **kw)
def keyword(self):
"""
Override the 'Compiling' default.
"""
return 'Running'
def __str__(self):
"""
More useful output.
"""
return "{prepend} [Python] {fn} {append}".format(
prepend=self.env.PREPEND,
fn=self.inputs[0].path_from(self.inputs[0].ctx.launch_node()),
append=self.env.APPEND
)
@TaskGen.feature('run_py_script')
@TaskGen.before_method('process_source')
def apply_run_py_script(tg):
"""Task generator for running a single Python module.
The generated task will honor the PYTHONPATH environmental variable
as well as a PYTHONPATH attribute of the build context environment.
Attributes:
* source -- A **single** source node or string. (required)
* target -- A single target or list of targets (nodes or strings).
* deps -- A single dependency or list of dependencies
(nodes or strings)
* add_to_pythonpath -- A string that will be appended to the
PYTHONPATH environment variable along with the appropriate
path separator.
* prepend -- A string that will be prepended to the command
* append -- A string that will be appended to the command
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
if not src_node:
tg.bld.fatal(
'Cannot find input file %s for processing' % tg.source
)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
# Create the task.
tsk = tg.create_task('run_py_script', src=src_node, tgt=tgt_nodes)
tsk.env.APPEND = getattr(tg, 'append', '')
tsk.env.PREPEND = getattr(tg, 'prepend', '')
tsk.buffer_output = getattr(tg, 'buffer_output', True)
# Custom execution environment
tsk.env.env = dict(os.environ)
if tsk.env.env.get('PYTHONPATH', None):
pythonpath = [tsk.env.env['PYTHONPATH']]
else:
pythonpath = []
if getattr(tsk.env, 'PYTHONPATH', None):
pythonpath.append(tsk.env.PYTHONPATH)
if getattr(tg, 'add_to_pythonpath', None):
pythonpath.append(tg.add_to_pythonpath)
if pythonpath:
tsk.env.env['PYTHONPATH'] = os.pathsep.join(pythonpath)
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal(
'Could not find dependency %r for running %r'
% (x, src_node.relpath())
)
tsk.dep_nodes.append(node)
Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath())
# Bypass the execution of process_source by setting the source to an empty list
tg.source = []
|
{
"content_hash": "bc6cf825544fff49244a54e16b0738e3",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 95,
"avg_line_length": 32.1764705882353,
"alnum_prop": 0.5980347349177331,
"repo_name": "MarekIgnaszak/econ-project-templates",
"id": "e9751c95e5da5616c64f77c8945dcd362fe4bf11",
"size": "4472",
"binary": false,
"copies": "1",
"ref": "refs/heads/python",
"path": ".mywaflib/waflib/extras/run_py_script.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "663"
},
{
"name": "Jupyter Notebook",
"bytes": "3572"
},
{
"name": "Python",
"bytes": "1222989"
},
{
"name": "Shell",
"bytes": "1716"
},
{
"name": "TeX",
"bytes": "14224"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from file_picker.uploads import models as upload_models
admin.site.register(upload_models.Image)
admin.site.register(upload_models.File)
|
{
"content_hash": "285c7cf878bcab5baa45768ef8335b49",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.8255813953488372,
"repo_name": "caktus/django-file-picker",
"id": "d7637a9894736bd34b52432db138dc9a18d1a82a",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "file_picker/uploads/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45812"
},
{
"name": "HTML",
"bytes": "867"
},
{
"name": "JavaScript",
"bytes": "242024"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "37894"
}
],
"symlink_target": ""
}
|
from aiohttp import web
import urllib.parse
from foglamp.common.storage_client.payload_builder import PayloadBuilder
from foglamp.services.core import connect
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/track |
-------------------------------------------------------------------------------
"""
async def get_asset_tracker_events(request):
"""
Args:
request:
Returns:
asset track records
:Example:
curl -X GET http://localhost:8081/foglamp/track
curl -X GET http://localhost:8081/foglamp/track?asset=XXX
curl -X GET http://localhost:8081/foglamp/track?event=XXX
curl -X GET http://localhost:8081/foglamp/track?service=XXX
"""
payload = PayloadBuilder().SELECT("asset", "event", "service", "foglamp", "plugin", "ts") \
.ALIAS("return", ("ts", 'timestamp')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS")) \
.WHERE(['1', '=', 1])
if 'asset' in request.query and request.query['asset'] != '':
asset = urllib.parse.unquote(request.query['asset'])
payload.AND_WHERE(['asset', '=', asset])
if 'event' in request.query and request.query['event'] != '':
event = request.query['event']
payload.AND_WHERE(['event', '=', event])
if 'service' in request.query and request.query['service'] != '':
service = urllib.parse.unquote(request.query['service'])
payload.AND_WHERE(['service', '=', service])
storage_client = connect.get_storage_async()
payload = PayloadBuilder(payload.chain_payload())
try:
result = await storage_client.query_tbl_with_payload('asset_tracker', payload.payload())
response = result['rows']
except KeyError:
raise web.HTTPBadRequest(reason=result['message'])
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
return web.json_response({'track': response})
|
{
"content_hash": "cfd020ba45cf5661da9fe43301a06131",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 100,
"avg_line_length": 39.05357142857143,
"alnum_prop": 0.563328760859625,
"repo_name": "foglamp/FogLAMP",
"id": "6a58f4391bb4f60b0b0255a67337314028d269bd",
"size": "2281",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/foglamp/services/core/api/asset_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3067"
},
{
"name": "C++",
"bytes": "2539743"
},
{
"name": "CMake",
"bytes": "55827"
},
{
"name": "Makefile",
"bytes": "20729"
},
{
"name": "Python",
"bytes": "2849148"
},
{
"name": "Shell",
"bytes": "204873"
},
{
"name": "TSQL",
"bytes": "41635"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import os
import socket
from apptools.preferences.preference_binding import bind_preference
from smb.SMBConnection import SMBConnection, OperationFailure
from traits.api import Str
from pychron.media_storage.storage import RemoteStorage
from pychron.paths import paths
import six
def cache_path(src):
return os.path.join(paths.image_cache_dir, os.path.basename(src))
class SMBStorage(RemoteStorage):
service_name = Str
url_name = 'SMB'
def __init__(self, bind=True, *args, **kw):
super(SMBStorage, self).__init__(bind=bind, *args, **kw)
if bind:
bind_preference(self, 'service_name', 'pychron.media_storage.smb_service_name')
def get_base_url(self):
return 'SMB://{}/{}'.format(self.host, self.service_name)
def getlist(self):
conn = self._get_connection()
if conn:
for sf in conn.listPath(self.service_name, '/'):
print(sf.filename)
conn.close()
def get(self, src, dest, use_cache=True):
src = ':'.join(src.split(':')[2:])
if isinstance(dest, (str, six.text_type)):
dest = open(dest, 'wb')
self._get_file(src, dest, use_cache)
def put(self, src, dest):
conn = self._get_connection()
if conn:
# make sure directory available to write to
if os.path.basename(dest) != dest:
self._r_mkdir(os.path.dirname(dest), conn)
if not isinstance(src, (str, six.text_type)):
conn.storeFile(self.service_name, dest, src)
else:
with open(src, 'rb') as rfile:
conn.storeFile(self.service_name, dest, rfile)
conn.close()
def _get_file(self, src, dest, use_cache):
if use_cache:
if self._get_cached(src, dest):
return
conn = self._get_connection()
if conn:
try:
conn.retrieveFile(self.service_name, src, dest)
except OperationFailure:
return
dest.seek(0)
if use_cache:
cp = cache_path(src)
with open(cp, 'wb') as cache:
cache.write(dest.read())
# os.chmod(cp, stat.S_IRUSR)
# os.chmod(cp, stat.S_IRUSR|stat.S_IROTH)
def _get_cached(self, src, dest):
p = cache_path(src)
if os.path.isfile(p):
with open(p, 'rb') as rfile:
dest.write(rfile.read())
return True
def _r_mkdir(self, dest, conn=None):
if conn is None:
conn = self._get_connection()
sep = os.path.sep
directories = dest.split(sep)
tmp_path = ''
for d in directories:
dir_content = conn.listPath(self.service_name, tmp_path)
if d not in [x.filename for x in dir_content if x.isDirectory]:
self.info('Directory {} is missing. Create it'.format(d))
conn.createDirectory(self.service_name, '{}{}{}'.format(tmp_path, sep, d))
tmp_path = '{}{}{}'.format(tmp_path, sep, d)
def _get_connection(self):
localname = socket.gethostname()
remotename = 'agustin'
conn = SMBConnection(self.username, self.password,
localname, remotename)
self.debug('get connection {}'.format(self.host))
if conn.connect(self.host):
return conn
else:
print('failed to connect')
if __name__ == '__main__':
import logging
logger = logging.getLogger('SMB')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
s = SMBStorage(bind=False,
host='agustin.nmbgmr.nmt.edu',
service_name='argon',
username=os.getenv('bureau_username'),
password=os.getenv('bureau_password'))
# s.getlist()
src = '/Users/argonlab3/Pychron/data/videos/1842/65941-10C-001.avi'
dest = 'FusionsCO2/1842/65941-10C-001qwe22.avi'
s.put(src, dest)
# s.put('/Users/ross/Desktop/argonfiles.txt', 'test/a/argonfiles.txt')
# s._r_mkdir('test/a')
# ============= EOF =============================================
|
{
"content_hash": "608be6e3bc04bf9bceb0a804d5689215",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 91,
"avg_line_length": 32.237037037037034,
"alnum_prop": 0.5533088235294118,
"repo_name": "UManPychron/pychron",
"id": "0a91f11155892744bd3a6d6ad7d83f4dee286ec6",
"size": "5147",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/media_storage/smb_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
}
|
import numpy as np
import tvm
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
from tvm import relay
from model_zoo import c2_squeezenet, c2_resnet50, c2_vgg19
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
from collections import namedtuple
def get_tvm_output(model,
input_data,
target,
ctx,
output_shape,
output_dtype='float32'):
""" Generic function to execute and get tvm output"""
# supporting multiple inputs in caffe2 in a bit tricky,
# because the input names can appear at the beginning or end of model.predict_net.external_input
assert isinstance(input_data, np.ndarray)
# here we use the first input blob to the first op to get the input name
input_names = model.predict_net.op[0].input[0]
shape_dict = {input_names: input_data.shape}
dtype_dict = {input_names: input_data.dtype}
mod, params = relay.frontend.from_caffe2(
model.init_net, model.predict_net, shape_dict, dtype_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape),
output_dtype))
return tvm_output.asnumpy()
def get_caffe2_output(model, x, dtype='float32'):
workspace.RunNetOnce(model.init_net)
input_blob = model.predict_net.op[0].input[0]
workspace.FeedBlob(input_blob, x.astype(dtype))
workspace.RunNetOnce(model.predict_net)
output_blob = model.predict_net.external_output[0]
c2_output = workspace.FetchBlob(output_blob)
return c2_output
def verify_caffe2_forward_impl(model, data_shape, out_shape):
dtype = 'float32'
data = np.random.uniform(size=data_shape).astype(dtype)
c2_out = get_caffe2_output(model, data, dtype)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, data, target, ctx, out_shape, dtype)
tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_squeezenet1_1():
verify_caffe2_forward_impl(c2_squeezenet, (1, 3, 224, 224), (1, 1000, 1, 1))
def test_forward_resnet50():
verify_caffe2_forward_impl(c2_resnet50, (1, 3, 224, 224), (1, 1000))
def test_forward_vgg19():
verify_caffe2_forward_impl(c2_vgg19, (1, 3, 224, 224), (1, 1000))
Model = namedtuple('Model', ['init_net', 'predict_net'])
def test_elementwise_add():
data_shape = (1, 16, 9, 9)
init_net = caffe2_pb2.NetDef()
init_net.name = 'test_init_net'
init_net.external_output[:] = ['A', 'B']
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['A'],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
core.CreateOperator(
'GivenTensorFill',
[],
['B'],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
])
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test_predict_net'
predict_net.external_input[:] = ['A', 'B']
predict_net.external_output[:] = ['C']
predict_net.op.extend([
core.CreateOperator(
'Add',
['A', 'B'],
['C'],
)
])
model = Model(init_net, predict_net)
verify_caffe2_forward_impl(model, data_shape, data_shape)
def test_elementwise_add_with_broadcast():
data_shape = (1, 16, 9, 9)
init_net = caffe2_pb2.NetDef()
init_net.name = 'test_init_net'
init_net.external_output[:] = ['A', 'B']
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['A'],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
core.CreateOperator(
'GivenTensorFill',
[],
['B'],
shape=(1,),
values=np.random.uniform(size=1).flatten().tolist(),
),
])
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test_predict_net'
predict_net.external_input[:] = ['A', 'B']
predict_net.external_output[:] = ['C']
predict_net.op.extend([
core.CreateOperator(
'Add',
['A', 'B'],
['C'],
broadcast=1,
)
])
model = Model(init_net, predict_net)
verify_caffe2_forward_impl(model, data_shape, data_shape)
def test_normalize_yuv():
data_shape = (1, 3, 96, 96)
init_net = caffe2_pb2.NetDef()
init_net.name = 'test_init_net'
init_net.external_output[:] = ['A', 'mean', 'std']
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['A'],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
core.CreateOperator(
'GivenTensorFill',
[],
['mean'],
shape=(1, 3,),
values=np.random.uniform(size=3).flatten().tolist(),
),
core.CreateOperator(
'GivenTensorFill',
[],
['std'],
shape=(1, 3,),
values=np.random.uniform(size=3).flatten().tolist(),
),
])
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test_predict_net'
predict_net.external_input[:] = ['A', 'mean', 'std']
predict_net.external_output[:] = ['C']
predict_net.op.extend([
core.CreateOperator(
'NormalizePlanarYUV',
['A', 'mean', 'std'],
['C'],
)
])
model = Model(init_net, predict_net)
verify_caffe2_forward_impl(model, data_shape, data_shape)
if __name__ == '__main__':
test_forward_squeezenet1_1()
test_forward_resnet50()
test_forward_vgg19()
test_elementwise_add()
test_elementwise_add_with_broadcast()
test_normalize_yuv()
|
{
"content_hash": "27fefae02a178bef163ada8723ab08f2",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 100,
"avg_line_length": 30.43577981651376,
"alnum_prop": 0.5733232856066315,
"repo_name": "Huyuwei/tvm",
"id": "92258bbc284e5aa39933079c9aa2d8bbac403d57",
"size": "7420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/frontend/caffe2/test_forward.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
import datetime
import sys
from lxml import etree
from xml.sax.saxutils import escape, unescape
TABLE = "table"
ROW = "row"
COL = "col"
FIELD = "field"
TAG = "tag"
HASHTAG = "hashtag"
# -----------------------------------------------------------------------------
def xml_encode(s):
if s:
s = escape(s, {"'": "'", '"': """})
return s
# -----------------------------------------------------------------------------
def xml_decode(s):
if s:
s = unescape(s, {"'": "'", """: '"'})
return s
# -----------------------------------------------------------------------------
def parse(source):
parser = etree.XMLParser(no_network=False)
result = etree.parse(source, parser)
return result
# -----------------------------------------------------------------------------
def s3_unicode(s, encoding="utf-8"):
if type(s) is unicode:
return s
try:
if not isinstance(s, basestring):
if hasattr(s, "__unicode__"):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, "strict")
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
s = " ".join([s3_unicode(arg, encoding) for arg in s])
else:
s = s.decode(encoding)
except UnicodeDecodeError:
if not isinstance(s, Exception):
raise
else:
s = " ".join([s3_unicode(arg, encoding) for arg in s])
return s
# -------------------------------------------------------------------------
def encode_iso_datetime(dt):
dx = dt - datetime.timedelta(microseconds=dt.microsecond)
return dx.isoformat()
# -------------------------------------------------------------------------
def xls2tree(source,
resourcename=None,
extra_data=None,
hashtags=None,
sheet=None,
rows=None,
cols=None,
fields=None,
header_row=True):
import xlrd
# Shortcuts
SubElement = etree.SubElement
DEFAULT_SHEET_NAME = "SahanaData"
# Root element
root = etree.Element(TABLE)
if resourcename is not None:
root.set("name", resourcename)
if isinstance(sheet, xlrd.sheet.Sheet):
# Open work sheet passed as argument => use this
s = sheet
else:
if hasattr(source, "read"):
# Source is a stream
if hasattr(source, "seek"):
source.seek(0)
wb = xlrd.open_workbook(file_contents=source.read(),
# requires xlrd 0.7.x or higher
on_demand=True)
elif isinstance(source, xlrd.book.Book):
# Source is an open work book
wb = source
else:
# Unsupported source type
raise RuntimeError("xls2tree: invalid source %s" % type(source))
# Find the sheet
try:
if isinstance(sheet, (int, long)):
s = wb.sheet_by_index(sheet)
elif isinstance(sheet, basestring):
s = wb.sheet_by_name(sheet)
elif sheet is None:
if DEFAULT_SHEET_NAME in wb.sheet_names():
s = wb.sheet_by_name(DEFAULT_SHEET_NAME)
else:
s = wb.sheet_by_index(0)
else:
raise SyntaxError("xls2tree: invalid sheet %s" % sheet)
except IndexError, xlrd.XLRDError:
s = None
def cell_range(cells, max_cells):
"""
Helper method to calculate a cell range
@param cells: the specified range
@param max_cells: maximum number of cells
"""
if not cells:
cells = (0, max_cells)
elif not isinstance(cells, (tuple, list)):
cells = (0, cells)
elif len(cells) == 1:
cells = (cells[0], max_cells)
else:
cells = (cells[0], cells[0] + cells[1])
return cells
if s:
# Calculate cell range
rows = cell_range(rows, s.nrows)
cols = cell_range(cols, s.ncols)
# Column headers
if fields:
headers = fields
elif not header_row:
headers = dict((i, "%s" % i) for i in range(cols[1]- cols[0]))
else:
# Use header row in the work sheet
headers = {}
# Lambda to decode XLS dates into an ISO datetime-string
decode_date = lambda v: datetime.datetime(*xlrd.xldate_as_tuple(v, wb.datemode))
def decode(t, v):
"""
Helper method to decode the cell value by type
@param t: the cell type
@param v: the cell value
@return: text representation of the cell value
"""
text = ""
if v:
if t is None:
text = s3_unicode(v).strip()
elif t == xlrd.XL_CELL_TEXT:
text = v.strip()
elif t == xlrd.XL_CELL_NUMBER:
text = str(long(v)) if long(v) == v else str(v)
elif t == xlrd.XL_CELL_DATE:
text = encode_iso_datetime(decode_date(v))
elif t == xlrd.XL_CELL_BOOLEAN:
text = str(value).lower()
return text
def add_col(row, name, t, v, hashtags=None):
"""
Helper method to add a column to an output row
@param row: the output row (etree.Element)
@param name: the column name
@param t: the cell type
@param v: the cell value
"""
col = SubElement(row, COL)
col.set(FIELD, name)
if hashtags:
hashtag = hashtags.get(name)
if hashtag and hashtag[1:]:
col.set(HASHTAG, hashtag)
col.text = decode(t, v)
hashtags = dict(hashtags) if hashtags else {}
# Process the rows
record_idx = 0
extra_fields = set(extra_data) if extra_data else None
check_headers = extra_fields is not None
for ridx in range(*rows):
# Read types and values
types = s.row_types(ridx, *cols)
values = s.row_values(ridx, *cols)
if header_row and record_idx == 0:
# Read column headers
if not fields:
for cidx, value in enumerate(values):
header = decode(types[cidx], value)
headers[cidx] = header
if check_headers:
extra_fields.discard(header)
check_headers = False
else:
if not fields and \
(header_row and record_idx == 1 or record_idx == 0):
# Autodetect hashtags
items = {}
for cidx, name in headers.items():
try:
t = types[cidx]
v = values[cidx]
except IndexError:
continue
if t not in (xlrd.XL_CELL_TEXT, xlrd.XL_CELL_EMPTY):
items = None
break
elif v:
items[name] = v
if items and all(v[0] == '#' for v in items.values()):
hashtags.update(items)
continue
# Add output row
orow = SubElement(root, ROW)
for cidx, name in headers.items():
if check_headers:
extra_fields.discard(name)
try:
t = types[cidx]
v = values[cidx]
except IndexError:
pass
else:
add_col(orow, name, t, v, hashtags=hashtags)
check_headers = False
# Add extra data
if extra_fields:
for key in extra_fields:
add_col(orow, key, None, extra_data[key], hashtags=hashtags)
record_idx += 1
return etree.ElementTree(root)
# -----------------------------------------------------------------------------
def transform(tree, stylesheet_path, **args):
if args:
_args = [(k, "'%s'" % args[k]) for k in args]
_args = dict(_args)
else:
_args = None
stylesheet = etree.parse(stylesheet_path)
ac = etree.XSLTAccessControl(read_file=True, read_network=True)
transformer = etree.XSLT(stylesheet, access_control=ac)
if _args:
result = transformer(tree, **_args)
else:
result = transformer(tree)
return result
# -----------------------------------------------------------------------------
def main(argv):
try:
xlspath = argv[0]
except:
print "Usage: python xls2xml <XLS File> [<XSLT Stylesheet>]"
return
try:
xslpath = argv[1]
except:
xslpath = None
xlsfile = open(xlspath)
tree = xls2tree(xlsfile)
if xslpath is not None:
tree = transform(tree, xslpath)
print etree.tostring(tree, pretty_print=True)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
# END =========================================================================
|
{
"content_hash": "420aba6df082883c91fb788f27420cfc",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 88,
"avg_line_length": 32.35117056856188,
"alnum_prop": 0.44515662152382923,
"repo_name": "flavour/RedHat",
"id": "03970def0b1f664dd69f7fdf39a28801d38ff5f7",
"size": "9965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "static/scripts/tools/xls2xml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2021201"
},
{
"name": "HTML",
"bytes": "1314882"
},
{
"name": "JavaScript",
"bytes": "19259784"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28664280"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4860"
},
{
"name": "XSLT",
"bytes": "2688849"
}
],
"symlink_target": ""
}
|
import msl.package_manager as pm
def test_github():
pkgs = pm.github()
assert pm.utils._PKG_NAME in pkgs
pkg = pkgs[pm.utils._PKG_NAME]
assert len(pkg['description']) > 0
assert len(pkg['version']) > 0
assert len(pkg['tags']) > 10
assert len(pkg['branches']) > 0
|
{
"content_hash": "3a121ede420a64bd4af94f536cb919cd",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 26.636363636363637,
"alnum_prop": 0.621160409556314,
"repo_name": "MSLNZ/msl-package-manager",
"id": "7c05bb589b6b8ef2ca636fa2106510c1fca5ea1f",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_github.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171189"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
from pages.models import Page
def rebuild_tree(apps, schema_editor):
Page.objects.rebuild()
class Migration(migrations.Migration):
dependencies = [
('pages', '0009_pages_mptt'),
]
operations = [
migrations.RunPython(rebuild_tree),
]
|
{
"content_hash": "42b247b486da8f815cd63bef61846804",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 43,
"avg_line_length": 16.714285714285715,
"alnum_prop": 0.6695156695156695,
"repo_name": "fidals/refarm-site",
"id": "123c4179601da01070e933b07ba4bab2e17616a4",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pages/migrations/0010_rebuild_pages_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64305"
},
{
"name": "HTML",
"bytes": "28723"
},
{
"name": "JavaScript",
"bytes": "31422"
},
{
"name": "Python",
"bytes": "296885"
}
],
"symlink_target": ""
}
|
import base64
import io
import os
import shutil
import tempfile
import zipfile
import invoke
from . import paths
@invoke.task
def authors():
print("[generate.authors] Generating AUTHORS")
# Get our list of authors
print("[generate.authors] Collecting author names")
r = invoke.run("git log --use-mailmap --format'=%aN <%aE>'", hide=True)
authors = []
seen_authors = set()
for author in r.stdout.splitlines():
author = author.strip()
if author.lower() not in seen_authors:
seen_authors.add(author.lower())
authors.append(author)
# Sort our list of Authors by their case insensitive name
authors = sorted(authors, key=lambda x: x.lower())
# Write our authors to the AUTHORS file
print("[generate.authors] Writing AUTHORS")
with io.open("AUTHORS.txt", "w", encoding="utf8") as fp:
fp.write(u"\n".join(authors))
fp.write(u"\n")
@invoke.task
def installer(installer_path=os.path.join(paths.CONTRIB, "get-pip.py")):
print("[generate.installer] Generating installer")
# Define our wrapper script
WRAPPER_SCRIPT = """
#!/usr/bin/env python
#
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# an entire copy of pip.
#
# Pip is a thing that installs packages, pip itself is a package that someone
# might want to install, especially if they're looking to run this get-pip.py
# script. Pip has a lot of code to deal with the security of installing
# packages, various edge cases on various platforms, and other such sort of
# "tribal knowledge" that has been encoded in its code base. Because of this
# we basically include an entire copy of pip inside this blob. We do this
# because the alternatives are attempt to implement a "minipip" that probably
# doesn't do things correctly and has weird edge cases, or compress pip itself
# down into a single file.
#
# If you're wondering how this is created, it is using an invoke task located
# in tasks/generate.py called "installer". It can be invoked by using
# ``invoke generate.installer``.
import os.path
import pkgutil
import shutil
import sys
import struct
import tempfile
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
iterbytes = iter
else:
def iterbytes(buf):
return (ord(byte) for byte in buf)
try:
from base64 import b85decode
except ImportError:
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{{|}}~")
def b85decode(b):
_b85dec = [None] * 256
for i, c in enumerate(iterbytes(_b85alphabet)):
_b85dec[c] = i
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in iterbytes(chunk):
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(iterbytes(chunk)):
if _b85dec[c] is None:
raise ValueError(
'bad base85 character at position %d' % (i + j)
)
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i)
result = b''.join(out)
if padding:
result = result[:-padding]
return result
def bootstrap(tmpdir=None):
# Import pip so we can use it to install pip and maybe setuptools too
import pip
from pip.commands.install import InstallCommand
# Wrapper to provide default certificate with the lowest priority
class CertInstallCommand(InstallCommand):
def parse_args(self, args):
# If cert isn't specified in config or environment, we provide our
# own certificate through defaults.
# This allows user to specify custom cert anywhere one likes:
# config, environment variable or argv.
if not self.parser.get_default_values().cert:
self.parser.defaults["cert"] = cert_path # calculated below
return super(CertInstallCommand, self).parse_args(args)
pip.commands_dict["install"] = CertInstallCommand
# We always want to install pip
packages = ["pip"]
# Check if the user has requested us not to install setuptools
if "--no-setuptools" in sys.argv or os.environ.get("PIP_NO_SETUPTOOLS"):
args = [x for x in sys.argv[1:] if x != "--no-setuptools"]
else:
args = sys.argv[1:]
# We want to see if setuptools is available before attempting to
# install it
try:
import setuptools # noqa
except ImportError:
packages += ["setuptools"]
delete_tmpdir = False
try:
# Create a temporary directory to act as a working directory if we were
# not given one.
if tmpdir is None:
tmpdir = tempfile.mkdtemp()
delete_tmpdir = True
# We need to extract the SSL certificates from requests so that they
# can be passed to --cert
cert_path = os.path.join(tmpdir, "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.requests", "cacert.pem"))
# Execute the included pip and use it to install the latest pip and
# setuptools from PyPI
sys.exit(pip.main(["install", "--upgrade"] + packages + args))
finally:
# Remove our temporary directory
if delete_tmpdir and tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
def main():
tmpdir = None
try:
# Create a temporary working directory
tmpdir = tempfile.mkdtemp()
# Unpack the zipfile into the temporary directory
pip_zip = os.path.join(tmpdir, "pip.zip")
with open(pip_zip, "wb") as fp:
fp.write(b85decode(DATA.replace(b"\\n", b"")))
# Add the zipfile to sys.path so that we can import it
sys.path.insert(0, pip_zip)
# Run the bootstrap
bootstrap(tmpdir=tmpdir)
finally:
# Clean up our temporary working directory
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
DATA = b\"\"\"
{zipfile}
\"\"\"
if __name__ == "__main__":
main()
""".lstrip()
# Get all of the files we want to add to the zip file
print("[generate.installer] Collect all the files that should be zipped")
all_files = []
for root, dirs, files in os.walk(os.path.join(paths.PROJECT_ROOT, "pip")):
for pyfile in files:
if os.path.splitext(pyfile)[1] in {".py", ".pem", ".cfg", ".exe"}:
path = os.path.join(root, pyfile)
all_files.append(
"/".join(
path.split("/")[len(paths.PROJECT_ROOT.split("/")):]
)
)
tmpdir = tempfile.mkdtemp()
try:
# Get a temporary path to use as staging for the pip zip
zpth = os.path.join(tmpdir, "pip.zip")
# Write the pip files to the zip archive
print("[generate.installer] Generate the bundled zip of pip")
with zipfile.ZipFile(zpth, "w", compression=zipfile.ZIP_DEFLATED) as z:
for filename in all_files:
z.write(os.path.join(paths.PROJECT_ROOT, filename), filename)
# Get the binary data that compromises our zip file
with open(zpth, "rb") as fp:
data = fp.read()
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
# Write out the wrapper script that will take the place of the zip script
# The reason we need to do this instead of just directly executing the
# zip script is that while Python will happily execute a zip script if
# passed it on the file system, it will not however allow this to work if
# passed it via stdin. This means that this wrapper script is required to
# make ``curl https://...../get-pip.py | python`` continue to work.
print(
"[generate.installer] Write the wrapper script with the bundled zip "
"file"
)
zipdata = base64.b85encode(data).decode("utf8")
chunked = []
for i in range(0, len(zipdata), 79):
chunked.append(zipdata[i:i + 79])
with open(installer_path, "w") as fp:
fp.write(WRAPPER_SCRIPT.format(zipfile="\n".join(chunked)))
# Ensure the permissions on the newly created file
oldmode = os.stat(installer_path).st_mode & 0o7777
newmode = (oldmode | 0o555) & 0o7777
os.chmod(installer_path, newmode)
print("[generate.installer] Generated installer")
|
{
"content_hash": "c2156da10618a1b6aa37cd367826cce1",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 34.08646616541353,
"alnum_prop": 0.6151979706628433,
"repo_name": "msabramo/pip",
"id": "098f9f4830e142f23785a5721a2d78747e933f44",
"size": "9067",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tasks/generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6790"
},
{
"name": "Python",
"bytes": "2360770"
},
{
"name": "Shell",
"bytes": "9133"
}
],
"symlink_target": ""
}
|
import argparse
import arrow
import cmd
import datetime
import glob
import json
import inspect
import os
import re
import readline
import shlex
import sys
import textwrap
import types
import talus_client.api
import talus_client.errors as errors
import talus_client.utils as utils
from talus_client.utils import Colors
ModelCmd = None
ENABLED_COMMANDS = []
class TalusMetaClass(type):
def __init__(cls, name, bases, namespace):
global ENABLED_COMMANDS
super(TalusMetaClass, cls).__init__(name, bases, namespace)
if cls.__name__ in ["TalusCmdBase"]:
return
ENABLED_COMMANDS.append(cls)
class TalusCmdBase(object,cmd.Cmd):
__metaclass__ = TalusMetaClass
# to be overridden by inheriting classes
command_name = ""
def __init__(self, talus_host=None, client=None, user=None):
"""Create a new TalusCmdBase
:talus_host: The root of the talus web app (e.g. http://localhost:8001 if the api is at http://localhost:8001/api)
"""
global ModelCmd
from talus_client.param_model import ModelCmd as MC
ModelCmd = MC
cmd.Cmd.__init__(self, "\t")
self.one_shot = False
self._last_was_keyboard = False
self._talus_host = talus_host
self._talus_client = client
self._talus_user = user
if self._talus_host is not None and self._talus_client is None:
self._talus_client = talus_client.api.TalusClient(self._talus_host, user=self._talus_user)
def _nice_name(self, model, attr):
if "name" in model._fields[attr].value:
return "{} ({})".format(model._fields[attr]["name"], model._fields[attr]["id"])
else:
return getattr(model, attr)
def _resolve_one_model(self, id_or_name, model, search, sort="-timestamps.created", default_id_search=None):
if default_id_search is None:
default_id_search = ["id", "name"]
if id_or_name is not None and not id_or_name.startswith("+"):
for default_compare in default_id_search:
res = model.find_one(**{default_compare:id_or_name})
if res is not None:
return res
return None
if id_or_name is None:
skip = 0
else:
if not re.match(r'^\+\d+$', id_or_name):
raise errors.TalusApiError("Git-like referencing must be a plus sign followed by digits")
skip = int(id_or_name.replace("+", "")) - 1
search["skip"] = skip
search["num"] = 1
search["sort"] = sort
return model.find_one(**search)
def _search_terms(self, parts, key_remap=None, user_default_filter=True, out_leftover=None, no_hex_keys=None):
"""Return a dictionary of search terms"""
if no_hex_keys is None:
no_hex_keys = []
search = {}
key = None
if key_remap is None:
key_remap = {}
key_map = {
"status": "status.name"
}
key_map.update(key_remap)
found_all = False
for item in parts:
if key is None:
if not item.startswith("--"):
if out_leftover is not None:
out_leftover.append(item)
continue
else:
raise errors.TalusApiError("args must be alternating search item/value pairs!")
item = item[2:].replace("-", "_")
key = item
if key == "all":
found_all = True
key = None
continue
if key in key_map:
key = key_map[key]
if key.endswith("__type") or key.endswith(".type"):
key += "_"
elif key is not None:
# hex conversion
if re.match(r'^0x[0-9a-f]+$', item, re.IGNORECASE) is not None and key.split("__")[0] not in no_hex_keys:
item = int(item, 16)
if key in search and not isinstance(search[key], list):
search[key] = [search[key]]
if key in search and isinstance(search[key], list):
search[key].append(item)
else:
search[key] = item
self.out("searching for {} = {}".format(key, item))
# reset this
key = None
if user_default_filter and not found_all and self._talus_user is not None:
# default filter by username tag
self.out("default filtering by username (searching for tags = {})".format(self._talus_user))
self.out("use --all to view all models")
if "tags" in search and not isinstance(search["tags"], list):
search["tags"] = [search["tags"]]
if "tags" in search and isinstance(search["tags"], list):
search["tags"].append(self._talus_user)
else:
search["tags"] = self._talus_user
if out_leftover is not None and key is not None:
out_leftover.append(key)
return search
def _actual_date(self, epoch):
return datetime.datetime.fromtimestamp(epoch).strftime("%Y-%m-%d %H:%M:%S")
def _rel_date(self, epoch):
return arrow.get(epoch).humanize()
def _prep_model(self, model):
if hasattr(model, "tags") and self._talus_user is not None and self._talus_user not in model.tags:
model.tags.append(self._talus_user)
def _make_model_cmd(self, model, prompt_part="create"):
res = ModelCmd(model, self._talus_host, self._talus_client)
res.prompt = self.prompt[:-2] + ":" + prompt_part + "> "
res._root = self._root
return res
def _go_interactive(self, args):
return ("--shell" in args or (len(args) == 0 and not self._root.one_shot))
def ask(self, msg):
msg = Colors.WARNING + msg + Colors.ENDC
return raw_input(msg)
def ok(self, msg):
"""
Print the message with a success/ok color
"""
msg = u"\n".join(Colors.OKGREEN + u"{}{}".format(u"[.] ", line) + Colors.ENDC for line in unicode(msg).split("\n"))
print(msg)
def out(self, msg, raw=False):
"""
Print the message with standard formatting
"""
pre = Colors.OKBLUE + "[+]" + Colors.ENDC + " "
if raw:
pre = " "
msg = u"\n".join(u"{}{}".format(pre, line) for line in unicode(msg).split("\n"))
print(msg)
def warn(self, msg):
"""
Print an error message
"""
# TODO colors?
msg = u"\n".join(Colors.FAIL + u"[!] {}".format(line) + Colors.ENDC for line in unicode(msg).split("\n"))
print(msg)
def err(self, msg):
"""
Print an error message
"""
# TODO colors?
msg = u"\n".join(Colors.FAIL + u"[E] {}".format(line) + Colors.ENDC for line in unicode(msg).split("\n"))
print(msg)
@property
def prompt(self):
caller_name = inspect.stack()[1][3]
if caller_name == "cmdloop":
return Colors.HEADER + self._prompt + Colors.ENDC
return self._prompt
@prompt.setter
def prompt(self, value):
self._prompt = value
return self._prompt
def emptyline(self):
"""don't repeat the last successful command"""
pass
def do_up(self, args):
"""Quit the current processor (move up a level)"""
return True
def do_quit(self, args):
"""Quit the program"""
return True
do_exit = do_quit
do_exit.__doc__ = do_quit.__doc__
def cmdloop(self, *args, **kwargs):
try:
return cmd.Cmd.cmdloop(self, *args, **kwargs)
except KeyboardInterrupt as e:
self.err("cancelled")
return True
def onecmd(self, *args, **kwargs):
try:
return cmd.Cmd.onecmd(self, *args, **kwargs)
except talus_client.errors.TalusApiError as e:
self.err(e.message)
except KeyboardInterrupt as e:
if not self._last_was_keyboard:
self.err("cancelled")
else:
self.err("if you want to quit, use the 'quit' command")
self._last_was_keyboard = True
# raised by argparse when args aren't correct
except SystemExit as e:
pass
else:
# no KeyboardInterrupts happened
self._last_was_keyboard = False
def default(self, line):
funcs = filter(lambda x: x.startswith("do_"), dir(self))
parts = line.split()
first_param = parts[0]
matches = filter(lambda x: x.startswith("do_" + first_param), funcs)
if len(matches) > 1:
self.warn("ambiguous command, matching commands:")
for match in matches:
print(" " + match.replace("do_", ""))
return
elif len(matches) == 1:
func = getattr(self, matches[0])
return func(" ".join(parts[1:]))
self.err("Unknown command. Try the 'help' command.")
def completedefault(self, text, line, begidx, endidx):
funcs = filter(lambda x: x.startswith("do_"), dir(self))
res = filter(lambda x: x.startswith(text), funcs)
return res
@classmethod
def get_command_helps(cls):
"""Look for methods in this class starting with do_.
:returns: A dict of commands and their help values. E.g. ``{"list": "List all the images"}``
"""
res = {}
regex = re.compile(r'^do_(.*)$')
for name in dir(cls):
match = regex.match(name)
if match is not None:
cmd = match.group(1)
prop = getattr(cls, name, None)
doc = getattr(prop, "__doc__", None)
if doc is not None:
lines = doc.split("\n")
res[cmd] = lines[0].lstrip() + textwrap.dedent("\n".join(lines[1:]).expandtabs(4))
return res
@classmethod
def get_help(cls, args=None, abbrev=False, examples=False):
args = "" if args is None else args
cmd = None
cmd_specific = (len(args) > 0)
cmd_helps = ""
if not cmd_specific:
cmd_helps += "\n{name}\n{under}\n".format(
name=cls.command_name,
under=("-"*len(cls.command_name))
)
else:
cmd = args.split(" ")[0]
for subcmd_name,subcmd_help in cls.get_command_helps().iteritems():
if cmd_specific and subcmd_name != cmd:
continue
if not examples and "\nExamples:\n" in subcmd_help:
subcmd_help,_ = subcmd_help.split("\nExamples:\n")
lines = subcmd_help.split("\n")
first_line = lines[0].lstrip()
label_start = "\n{:>10} - ".format(subcmd_name)
spaces = " " * len(label_start)
label_line = label_start + first_line
cmd_helps += "\n".join(textwrap.wrap(
label_line,
subsequent_indent=spaces
))
if len(lines) > 2 and not abbrev:
cmd_helps += "\n\n" + "\n".join(spaces + x for x in lines[1:])
cmd_helps += "\n"
return cmd_helps
def do_help(self, args):
examples = (len(args) > 0)
print(self.get_help(args=args, examples=examples))
# -----------------------------------
def _argparser(self):
# TODO make this a loop and find the first do_XXXX function in
# the current callstack?
caller_name = inspect.stack()[1][3]
if self.one_shot:
return argparse.ArgumentParser(self.command_name + " " + caller_name.replace("do_", ""))
else:
return argparse.ArgumentParser(caller_name.replace("do_", ""))
class TalusCmd(TalusCmdBase):
"""The main talus command. This is what is invoked when dropping
into a shell or when run from the command line"""
command_name = "<ROOT>"
def __init__(self, talus_host=None, client=None, one_shot=False, user=None):
"""Initialize the Talus command object
:one_shot: True if only one command is to be processed (cmd-line args, no shell, etc)
"""
super(TalusCmd, self).__init__(talus_host=talus_host, client=client, user=user)
self.prompt = "talus> "
self.one_shot = one_shot
# auto-import all defined commands in talus/cmds/*.py
this_dir = os.path.dirname(__file__)
for filename in glob.glob(os.path.join(this_dir, "*.py")):
basename = os.path.basename(filename)
if basename == "__init__.py":
continue
mod_name = basename.replace(".py", "")
mod_base = __import__("talus_client.cmds", globals(), locals(), fromlist=[mod_name])
mod = getattr(mod_base, mod_name)
def make_cmd_handler(cls):
def _handle_command(self, args):
processor = cls(talus_host=self._talus_host, client=self._talus_client, user=self._talus_user)
processor._root = self
processor.prompt = "talus:" + processor.command_name + "> "
if self.one_shot or len(args) > 0:
processor.one_shot = True
processor.onecmd(args)
else:
processor.cmdloop()
return _handle_command
def define_root_commands():
for cls in ENABLED_COMMANDS:
if cls.command_name == "" or cls == TalusCmd:
continue
handler = make_cmd_handler(cls)
# the baseclass cmd.Cmd always defines a do_help, so we need to check if it's
# redefined in the specific subclass
if "do_help" in cls.__dict__:
handler.__doc__ = cls.do_help.__doc__
else:
handler.__doc__ = cls.__doc__
setattr(TalusCmd, "do_" + cls.command_name, handler)
define_root_commands()
|
{
"content_hash": "3191f7569281079fa77fb6a17bb9960f",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 118,
"avg_line_length": 27.79905437352246,
"alnum_prop": 0.6485245343991836,
"repo_name": "optiv-labs/talus_client",
"id": "1bd7909e99939a8811d983b6c051289a7cbbceff",
"size": "11800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talus_client/cmds/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138533"
}
],
"symlink_target": ""
}
|
import sys
from PyQt4 import QtGui, QtCore
class Window(QtGui.QMainWindow):
def __init__(self): #Runs everytime we make a window object
super(Window, self).__init__()
self.setGeometry(50, 50, 500, 300)
self.setWindowTitle("PyQT tuts!")
self.setWindowIcon(QtGui.QIcon('pythonlogo.png'))
# MENUBAR
extractAction = QtGui.QAction("&GET TO THE CHOPPAH!!!", self)
extractAction.setShortcut("Ctrl+Q")
extractAction.setStatusTip('Leave The App') #Left-bottom tip
extractAction.triggered.connect(self.close_application)
# OPENEDITOR
openEditor = QtGui.QAction("&Editor", self)
openEditor.setShortcut("Ctrl+E")
openEditor.setStatusTip('Open Editor')
openEditor.triggered.connect(self.editor)
# --OPENEDITOR
# OPENFILE
openFile = QtGui.QAction("&Open File", self)
openFile.setShortcut("Ctrl+O")
openFile.setStatusTip('Open File')
openFile.triggered.connect(self.file_open)
# --OPENFILE
self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
fileMenu.addAction(extractAction)
fileMenu.addAction(openFile)
# EDITORMENU
editorMenu = mainMenu.addMenu("&Editor")
editorMenu.addAction(openEditor)
# --EDITORMENU
# --MENUBAR
self.home()
def home(self):
btn =QtGui.QPushButton("Quit", self)
#btn.clicked.connect(QtCore.QCoreApplication.instance().quit)
btn.clicked.connect(self.close_application)
#btn.resize(100, 100)
#btn.resize(btn.sizeHint()) #Suggested size
btn.resize(btn.minimumSizeHint()) #Minimum suggested size
btn.move(0, 100)
# TOOLBAR
extractAction = QtGui.QAction(QtGui.QIcon('todachoppa.png'), 'Flee the Scene', self)
extractAction.triggered.connect(self.close_application)
self.toolBar = self.addToolBar("Extraction")
self.toolBar.addAction(extractAction)
# --TOOLBAR
# FONT WIDGET
fontChoice = QtGui.QAction('Font', self)
fontChoice.triggered.connect(self.font_choice)
#self.toolBar = self.addToolBar("Font")
#You can add it to the first toolbar or create one new
self.toolBar.addAction(fontChoice)
# --FONT WIDGET
# BACKGROUND COLOR WIDGET
color = QtGui.QColor(0, 0, 0)
fontColor = QtGui.QAction('Font bg Color', self)
fontColor.triggered.connect(self.color_picker)
self.toolBar.addAction(fontColor)
# --BACKGROUND COLOR WIDGET
# CHECKBOX
checkBox = QtGui.QCheckBox('Enlarge Window', self)
checkBox.move(300, 25)
#checkbox.toogle()
checkBox.stateChanged.connect(self.enlarge_window)
# --CHECKBOX
# PROGRESSBAR
self.progress = QtGui.QProgressBar(self)
self.progress.setGeometry(200, 80, 250, 20)
self.btn = QtGui.QPushButton("Download", self)
self.btn.move(200, 120)
self.btn.clicked.connect(self.download);
# --PROGRESSBAR
# DROPDOWN FOR STYLE
print(self.style().objectName())
self.styleChoice = QtGui.QLabel("Windows Vista", self)
comboBox = QtGui.QComboBox(self)
comboBox.addItem("motif")
comboBox.addItem("Windows")
comboBox.addItem("cde")
comboBox.addItem("Plastique")
comboBox.addItem("Cleanlooks")
comboBox.addItem("windowsvista")
comboBox.move(50, 250)
self.styleChoice.move(50, 150)
comboBox.activated[str].connect(self.style_choice)
# --DROPDOWN
# CALENDAR WIDGET
cal = QtGui.QCalendarWidget(self)
cal.move(500, 200)
cal.resize(200, 200)
# --CALENDAR WIDGET
self.show()
def file_open(self):
name = QtGui.QFileDialog.getOpenFileName(self, 'Open File')
file = open(name, 'r')
self.editor()
with file:
text = file.read()
self.textEdit.setText(text)
def color_picker(self):
color = QtGui.QColorDialog.getColor()
self.styleChoice.setStyleSheet("QWidget { background-color: %s }" % color.name())
def editor(self):
self.textEdit = QtGui.QTextEdit()
self.setCentralWidget(self.textEdit)
def font_choice(self):
font, valid = QtGui.QFontDialog.getFont()
if valid:
self.styleChoice.setFont(font)
def style_choice(self, text):
self.styleChoice.setText(text)
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(text))
def download(self):
self.completed = 0
while self.completed < 100:
self.completed += 0.0001
self.progress.setValue(self.completed)
def enlarge_window(self, state):
if state == QtCore.Qt.Checked:
self.setGeometry(50, 50, 1000, 600)
else:
self.setGeometry(50, 50, 500, 300)
def close_application(self):
# POPUP MESSAGE
choice = QtGui.QMessageBox.question(self, 'Extract!',
"Get into the chopper?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
#Last argument will be treated as the default choice
if choice == QtGui.QMessageBox.Yes:
print("Extracting Naaaooooww!!!")
sys.exit()
else:
pass
# --POPUP MESSAGE
def closeEvent(self, event): #Making the X-window work with the pop-up
event.ignore()
self.close_application()
def run():
app = QtGui.QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_())
run()
|
{
"content_hash": "5caea73bbc04241dd25f6b687e6471ca",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 104,
"avg_line_length": 31.52747252747253,
"alnum_prop": 0.6106657371906588,
"repo_name": "mbayon/TFG-MachineLearning",
"id": "4be4ffd05385f80e3b19d19b0f9c43941c2e513d",
"size": "5738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyQt-PythonInterfaces/basic-example-14.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24787"
},
{
"name": "Julia",
"bytes": "11103"
},
{
"name": "Matlab",
"bytes": "98571"
},
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Python",
"bytes": "115284"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
}
|
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
letters = {}
for l in magazine:
if letters.has_key(l):
letters[l] = letters[l] + 1
else:
letters[l] = 1
for ra in ransomNote:
if letters.has_key(ra):
letters[ra] = letters[ra] - 1
else:
letters[ra] = -1
for k in letters.keys():
if letters[k] < 0:
return False
return True
|
{
"content_hash": "9cf273032319879e905dc8deb0ae053b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 28.545454545454547,
"alnum_prop": 0.445859872611465,
"repo_name": "lunabox/leetcode",
"id": "7ec07f772930ee664c04f4db99d2c28d73a42af6",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/s383_Ransom_Note.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "4977"
},
{
"name": "JavaScript",
"bytes": "19188"
},
{
"name": "Kotlin",
"bytes": "50094"
},
{
"name": "Python",
"bytes": "38767"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmDateRolledBackEvent(vim, *args, **kwargs):
'''This event records when the VirtualCenter server date rolled back.'''
obj = vim.client.factory.create('ns0:VmDateRolledBackEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "251bc5cf99529d278144a37ea8a7a6b8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6023789294817332,
"repo_name": "xuru/pyvisdk",
"id": "5530341d44bf43cc25cdbc3b46622144f05ee48e",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/vm_date_rolled_back_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax', 'julia']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'StrPack.jl'
AUTHORS = u"Patrick O'Leary"
copyright = u'2013, '+AUTHORS
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'devel'
# The full version, including alpha/beta/rc tags.
release = 'development'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'jl'
highlight_language = 'julia'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'julia'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {
# '**': ['localtoc.html', 'relations.html', 'searchbox.html'],
# 'index': [],
# 'search': [],
#}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'JuliaLanguageDoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'utf8extra': r'''
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
\DeclareUnicodeCharacter{2203}{\ensuremath{\exists}}
\DeclareUnicodeCharacter{2200}{\ensuremath{\forall}}
\DeclareUnicodeCharacter{27FA}{\ensuremath{\Longleftrightarrow}}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'StrPack.jl.tex', u'StrPack.jl Documentation',
AUTHORS, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'strpack.jl', u'StrPack.jl Documentation',
[AUTHORS], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'StrPack.jl', u'StrPack.jl Documentation',
AUTHORS, 'StrPack.jl', 'A structured binary stream toolkit for Julia.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "69f4ef704e5f35819fe184331cff5e43",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 80,
"avg_line_length": 32.131147540983605,
"alnum_prop": 0.6982142857142857,
"repo_name": "Keno/StrPack.jl",
"id": "d90789698b10e0ab31e196082103829954bc7b3a",
"size": "8265",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "13637"
},
{
"name": "Python",
"bytes": "8962"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BlogPost.site'
db.add_column('blog_blogpost', 'site', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['sites.Site']), keep_default=False)
def backwards(self, orm):
# Deleting field 'BlogPost.site'
db.delete_column('blog_blogpost', 'site_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.blogcategory': {
'Meta': {'object_name': 'BlogCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'blog.blogpost': {
'Meta': {'object_name': 'BlogPost'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'blogposts'", 'blank': 'True', 'to': "orm['blog.BlogCategory']"}),
#'comments': ('mezzanine.generic.fields.CommentsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.ThreadedComment']"}),
'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': "orm['auth.User']"})
},
'comments.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'object_name': 'AssignedKeyword'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.TextField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.threadedcomment': {
'Meta': {'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blog']
|
{
"content_hash": "dcb4fb8070f5ec9a15e5171a353eac00",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 190,
"avg_line_length": 74.28346456692914,
"alnum_prop": 0.5522577909688361,
"repo_name": "eRestin/MezzGIS",
"id": "7ada431141846ee776b1a77771845832e1beec6a",
"size": "9452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/blog/migrations/0007_auto__add_field_blogpost_site.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "359014"
},
{
"name": "HTML",
"bytes": "153323"
},
{
"name": "JavaScript",
"bytes": "525988"
},
{
"name": "Nginx",
"bytes": "3644"
},
{
"name": "Perl",
"bytes": "271341"
},
{
"name": "Python",
"bytes": "1130497"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
}
|
"""Contains the logic for `aq show personality`."""
from sqlalchemy.orm import joinedload, subqueryload, contains_eager
from sqlalchemy.sql import or_
from aquilon.aqdb.model import (Archetype, Personality, HostEnvironment,
PersonalityGrnMap)
from aquilon.worker.dbwrappers.grn import lookup_grn
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.formats.personality import (PersonalityList,
SimplePersonalityList)
class CommandSearchPersonality(BrokerCommand):
required_parameters = []
def render(self, session, personality, archetype, grn, eon_id,
host_environment, config_override, fullinfo, **arguments):
q = session.query(Personality)
if archetype:
dbarchetype = Archetype.get_unique(session, archetype, compel=True)
q = q.filter_by(archetype=dbarchetype)
if personality:
q = q.filter_by(name=personality)
if config_override:
q = q.filter_by(config_override=True)
if host_environment:
host_env = HostEnvironment.get_unique(session, host_environment, compel=True)
q = q.filter_by(host_environment=host_env)
if grn or eon_id:
dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)
q = q.outerjoin(PersonalityGrnMap)
q = q.filter(or_(Personality.owner_eon_id == dbgrn.eon_id,
PersonalityGrnMap.eon_id == dbgrn.eon_id))
q = q.join(Archetype)
q = q.order_by(Archetype.name, Personality.name)
q = q.options(contains_eager('archetype'))
if fullinfo:
q = q.options(subqueryload('services'),
subqueryload('_grns'),
subqueryload('features'),
joinedload('features.feature'),
joinedload('cluster_infos'))
return PersonalityList(q.all())
else:
return SimplePersonalityList(q.all())
|
{
"content_hash": "edeba6b6ad0dae0d77ba029ce878fcce",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 89,
"avg_line_length": 38.907407407407405,
"alnum_prop": 0.603998096144693,
"repo_name": "stdweird/aquilon",
"id": "61affe9d76d7833d861a6e1e6eff44b75f285857",
"size": "2784",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/worker/commands/search_personality.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
import tornado
from tornado import gen
from tornado.web import HTTPError
from handler.api.base import BaseHandler
#from data.collections import User
class ProfileHandler(BaseHandler):
def data_received(self, chunk):
pass
@tornado.web.asynchronous
@gen.coroutine
def get(self):
uid = self.is_logined()
user_id = self.get_argument('id', uid)
user_id = user_id if user_id != '' else uid
self.vaildate_id(user_id)
user = yield User.objects.get(user_id)
self.vaildate_resource(user)
self.write_json(user.to_dict())
@tornado.web.asynchronous
@gen.coroutine
def put(self):
uid = self.is_logined()
user = yield User.objects.get(uid)
self.vaildate_resource(user)
need_edit = 0
nickname = self.get_argument('nickname', None)
if self.vaildate_nickname(nickname):
user.nickname = nickname
need_edit += 1
gender = self.get_argument('gender', '')
if gender in ['0', '1']:
user.gender = int(gender)
need_edit += 1
description = self.get_argument('description', None)
if self.vaildate_description(description):
user.description = description
need_edit += 1
if need_edit != 0:
yield user.save()
self.write_json(user.to_dict())
# TODO:对昵称和描述进行限制
@staticmethod
def vaildate_nickname(nickname):
if nickname is not None and len(nickname) > 0:
return True
else:
return False
@staticmethod
def vaildate_description(description):
if description is not None and len(description) > 0:
return True
else:
return False
class AvatarUploadHandler(BaseHandler):
def data_received(self, chunk):
pass
@tornado.web.asynchronous
@gen.coroutine
def post(self):
uid = self.is_logined()
url = self.upload_file_from_request('avatar', 'avatar/')
user = yield User.objects.get(uid)
self.check_none(user)
user.avatar_url = url
yield user.save()
self.write_json(user.to_dict())
|
{
"content_hash": "a8962501c49f131df5fff934d73c24ad",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 64,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.5941176470588235,
"repo_name": "INAP-LABS/noc-orchestrator",
"id": "f6229fee54b44118718f5218ceeb584f98940376",
"size": "2248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_historico/sandbox/api_tornado/handler/api/user/profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "729"
},
{
"name": "Python",
"bytes": "82719"
},
{
"name": "Shell",
"bytes": "6417"
}
],
"symlink_target": ""
}
|
import io
import json
import time
from typing import Dict, List, Union, Any
import dateutil
from .fs import FS
from .stat_result import FileType, StatResult
def _stat_dict_to_stat_result(stat: Dict[str, Any]) -> StatResult:
dt = dateutil.parser.isoparse(stat['modification_time'])
mtime = time.mktime(dt.timetuple())
if stat['is_dir']:
typ = FileType.DIRECTORY
elif stat['is_link']:
typ = FileType.SYMLINK
else:
typ = FileType.FILE
return StatResult(path=stat['path'], owner=stat['owner'], size=stat['size'],
typ=typ, modification_time=mtime)
class HadoopFS(FS):
def __init__(self, utils_package_object, jfs):
self._utils_package_object = utils_package_object
self._jfs = jfs
def open(self, path: str, mode: str = 'r', buffer_size: int = 8192):
return self._open(path, mode, buffer_size, use_codec=False)
def legacy_open(self, path: str, mode: str = 'r', buffer_size: int = 8192):
# this method for combatibility with hadoop_open in 0.2
return self._open(path, mode, buffer_size, use_codec=True)
def _open(self, path: str, mode: str = 'r', buffer_size: int = 8192, use_codec: bool = False):
handle: Union[io.BufferedReader, io.BufferedWriter]
if 'r' in mode:
handle = io.BufferedReader(HadoopReader(self, path, buffer_size, use_codec=use_codec), buffer_size=buffer_size)
elif 'w' in mode:
handle = io.BufferedWriter(HadoopWriter(self, path, use_codec=use_codec), buffer_size=buffer_size)
elif 'x' in mode:
handle = io.BufferedWriter(HadoopWriter(self, path, exclusive=True, use_codec=use_codec), buffer_size=buffer_size)
if 'b' in mode:
return handle
else:
return io.TextIOWrapper(handle, encoding='iso-8859-1')
def copy(self, src: str, dest: str):
self._jfs.copy(src, dest, False)
def exists(self, path: str) -> bool:
return self._jfs.exists(path)
def is_file(self, path: str) -> bool:
return self._jfs.isFile(path)
def is_dir(self, path: str) -> bool:
return self._jfs.isDir(path)
def stat(self, path: str) -> StatResult:
stat_dict = json.loads(self._utils_package_object.stat(self._jfs, path))
return _stat_dict_to_stat_result(stat_dict)
def ls(self, path: str) -> List[StatResult]:
return [_stat_dict_to_stat_result(st)
for st in json.loads(self._utils_package_object.ls(self._jfs, path))]
def mkdir(self, path: str) -> None:
return self._jfs.mkDir(path)
def remove(self, path: str):
return self._jfs.remove(path)
def rmtree(self, path: str):
return self._jfs.rmtree(path)
def supports_scheme(self, scheme: str) -> bool:
return self._jfs.supportsScheme(scheme)
def canonicalize_path(self, path: str) -> str:
return self._jfs.makeQualified(path)
class HadoopReader(io.RawIOBase):
def __init__(self, hfs, path, buffer_size, use_codec=False):
super(HadoopReader, self).__init__()
self._seekable = not use_codec
if use_codec:
self._jfile = hfs._utils_package_object.readFileCodec(hfs._jfs, path, buffer_size)
else:
self._jfile = hfs._utils_package_object.readFile(hfs._jfs, path, buffer_size)
self.mode = 'rb'
def close(self):
self._jfile.close()
def readable(self):
return True
def seekable(self):
return self._seekable
def seek(self, offset, whence=io.SEEK_SET):
if not 0 <= whence <= 2:
raise io.UnsupportedOperation(f'unsupported whence value {whence}')
return self._jfile.seek(offset, whence)
def tell(self):
return self._jfile.getPosition()
def readinto(self, b):
b_from_java = self._jfile.read(len(b))
n_read = len(b_from_java)
b[:n_read] = b_from_java
return n_read
class HadoopWriter(io.RawIOBase):
def __init__(self, hfs, path, exclusive=False, use_codec=False):
super(HadoopWriter, self).__init__()
if use_codec:
self._jfile = hfs._utils_package_object.writeFileCodec(hfs._jfs, path, exclusive)
else:
self._jfile = hfs._utils_package_object.writeFile(hfs._jfs, path, exclusive)
if exclusive:
self.mode = 'xb'
else:
self.mode = 'wb'
def writable(self):
return True
def close(self):
self._jfile.close()
def flush(self):
self._jfile.flush()
def write(self, b):
self._jfile.write(bytearray(b))
return len(b)
|
{
"content_hash": "be7e39ba894b9bbd65a5703b6cdac0de",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 126,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.6102311643835616,
"repo_name": "hail-is/hail",
"id": "e172a06c07bc2f7c0961aac8534250587e5334d7",
"size": "4672",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hail/python/hail/fs/hadoop_fs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
import os
from uuid import uuid4
import create_dataset_video_sample
import pytest
import helpers
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
VIDEO_METADATA_SCHEMA_URI = (
"gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml"
)
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_dataset):
yield
@pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420")
def test_ucaip_generated_create_dataset_video_sample_vision(capsys, shared_state):
create_dataset_video_sample.create_dataset_video_sample(
display_name=f"temp_create_dataset_test_{uuid4()}", project=PROJECT_ID
)
out, _ = capsys.readouterr()
assert "create_dataset_response" in out
shared_state["dataset_name"] = helpers.get_name(out)
|
{
"content_hash": "6577f2405d65e093f747376e4ea697b7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 84,
"avg_line_length": 28.821428571428573,
"alnum_prop": 0.7410161090458488,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "902676b8bbcb1130eef3aad164247c39c4cf2bfa",
"size": "1382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/snippets/dataset_service/create_dataset_video_sample_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
}
|
"""
from: http://adventofcode.com/2016/day/6
--- Part Two ---
Of course, that would be the message - if you hadn't agreed to use a modified repetition code
instead.
In this modified code, the sender instead transmits what looks like random data, but for each
character, the character they actually want to send is slightly less likely than the others.
Even after signal-jamming noise, you can look at the letter distributions in each column and choose
the least common letter to reconstruct the original message.
In the above example, the least common character in the first column is a; in the second, d, and so
on. Repeating this process for the remaining characters produces the original message, advent.
Given the recording in your puzzle input and this new decoding methodology, what is the original
message that Santa is trying to send?
"""
from collections import Counter
def main():
totals = [Counter() for _ in range(8)]
with open("input.txt") as f:
for line in f:
for position, value in enumerate(line.strip()):
totals[int(position)][value] += 1
for c in totals:
print(c.most_common()[-1])
if __name__ == "__main__":
main()
|
{
"content_hash": "93c68664a071a651dad8fabd99df761b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 99,
"avg_line_length": 37.53125,
"alnum_prop": 0.7144046627810158,
"repo_name": "kmcginn/advent-of-code",
"id": "c3c97d0122a8955dfd1ede553be7caaa21804aad",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016/day06/signal2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189615"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_managed_cluster_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"configName": _SERIALIZER.url("config_name", config_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"configName": _SERIALIZER.url("config_name", config_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"configName": _SERIALIZER.url("config_name", config_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class MaintenanceConfigurationsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_03_02_preview.ContainerServiceClient`'s
:attr:`maintenance_configurations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_managed_cluster(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> Iterable["_models.MaintenanceConfiguration"]:
"""Gets a list of maintenance configurations in the specified managed cluster.
Gets a list of maintenance configurations in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MaintenanceConfiguration or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.MaintenanceConfigurationListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_managed_cluster_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_managed_cluster.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_managed_cluster.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Gets the specified maintenance configuration of a managed cluster.
Gets the specified maintenance configuration of a managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.MaintenanceConfiguration]
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"} # type: ignore
@overload
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: _models.MaintenanceConfiguration,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: Union[_models.MaintenanceConfiguration, IO],
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Is either a model type or
a IO type. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.MaintenanceConfiguration]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "MaintenanceConfiguration")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> None:
"""Deletes a maintenance configuration.
Deletes a maintenance configuration.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-03-02-preview")
) # type: Literal["2022-03-02-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"} # type: ignore
|
{
"content_hash": "5e3ace7bbb628c2104ab7fa79a5ded08",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 232,
"avg_line_length": 43.61764705882353,
"alnum_prop": 0.6426537798756274,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e70a0fb14851604c5994f60405afa33e9ff4d6b4",
"size": "27194",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_03_02_preview/operations/_maintenance_configurations_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstallationVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--save-exact',
PackageInstallationVersionOption.TILDE: None,
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored.'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
def run_cli(self, cli, args=None, node_paths=None):
raise RuntimeError('npm does not support run cli directly. Please use Yarn instead.')
|
{
"content_hash": "11a6469bfee557808436a6ada7a7947e",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 102,
"avg_line_length": 37.28879310344828,
"alnum_prop": 0.701652988093862,
"repo_name": "UnrememberMe/pants",
"id": "5ed8dad3bd05881b8fb5db56ee7c137edf576bd2",
"size": "8798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/node/src/python/pants/contrib/node/subsystems/package_managers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "48321"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5767085"
},
{
"name": "Rust",
"bytes": "427157"
},
{
"name": "Scala",
"bytes": "75938"
},
{
"name": "Shell",
"bytes": "75470"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .settings import VOLUME_STATES, VOLUME_TYPES, VOLUME_STATE_CREATING, VOLUME_TYPE_VOLUME
class Volume(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(_('Volume name'), null=False, blank=False, max_length=128)
volume_id = models.CharField(_('OS Volume UUID'), null=True, blank=False, max_length=128)
size = models.IntegerField(_('Volume size'), null=False, blank=False)
volume_type = models.IntegerField(_('Volume Type'), choices=VOLUME_TYPES, default=VOLUME_TYPE_VOLUME)
status = models.IntegerField(_('Status'), choices=VOLUME_STATES, default=VOLUME_STATE_CREATING)
create_date = models.DateTimeField(_("Create Date"), auto_now_add=True)
deleted = models.BooleanField(_("Deleted"), default=False)
"""
User info
"""
user = models.ForeignKey('auth.User')
user_data_center = models.ForeignKey('idc.UserDataCenter')
instance = models.ForeignKey('instance.Instance', null=True, blank=True)
class Meta:
db_table = "volume"
ordering = ['-create_date']
verbose_name = _("Volume")
verbose_name_plural = _("Volume")
|
{
"content_hash": "0e744431a2b2764b03c30ecc3d11803c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 106,
"avg_line_length": 43.75,
"alnum_prop": 0.6906122448979591,
"repo_name": "lyndonChen/eonboard",
"id": "ca92caea0c2f8ff04ebe2ce82536c3fc8a3ae916",
"size": "1249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eoncloud_web/biz/volume/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "881794"
},
{
"name": "HTML",
"bytes": "372758"
},
{
"name": "JavaScript",
"bytes": "472042"
},
{
"name": "Python",
"bytes": "582493"
}
],
"symlink_target": ""
}
|
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customised separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customised as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_gis() the GIS menu GIS configurations
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
main_menu = MM()(
# Modules-menu, align-left
cls.menu_modules(),
# Service menus, align-right
# Note: always define right-hand items in reverse order!
cls.menu_help(right=True),
cls.menu_auth(right=True),
cls.menu_lang(right=True),
cls.menu_admin(right=True),
cls.menu_gis(right=True)
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
return [
homepage(),
homepage("project"),
homepage("req", f="req")(
MM("Fulfill Requests", f="req"),
MM("Request Supplies", f="req", m="create", vars={"type": 1}),
MM("Request People", f="req", m="create", vars={"type": 3})
),
MM("Locations", c="gis")(
MM("Facilities", c="org", f="facility"),
#MM("Warehouses", c="inv", f="warehouse"),
MM("Create a Facility", c="org", f="facility", m="create")
),
MM("Contacts", c="hrm", f="staff")(
MM("Staff", c="hrm", f="staff"),
MM("Groups", c="hrm", f="group"),
MM("Organizations", c="org", f="organisation"),
MM("Networks", c="org", f="group"),
#MM("People Registry", c="pr", f="index")
),
MM("Resources", url="http://occupysandy.net/resources/coordination/")(
MM("Assets", c="asset", f="asset"),
MM("Inventory", c="inv", f="inv_item"),
MM("Stock Counts", c="inv", f="adj"),
MM("Shipments", c="inv", f="send")
),
MM("Cases", c="assess", f="building")(
MM("Building Assessments", f="building"),
MM("Canvass", f="canvass"),
),
MM("Survey", c="survey")(
MM("Templates", f="template"),
MM("Assessments", f="series"),
MM("Import Templates", f="question_list", m="import"),
),
]
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customised separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
def inv(self):
""" INV / Inventory """
ADMIN = current.session.s3.system_roles.ADMIN
return M()(
M("Facilities", c="inv", f="facility")(
M("New", m="create"),
#M("Search"),
M("Map", m="map"),
M("Import", m="import")
),
M("Warehouse Stock", c="inv", f="inv_item")(
#M("Search"),
#M("Search Shipped Items", f="track_item"),
M("Stock Count", f="adj"),
#M("Kitting", f="kit"),
M("Import", f="inv_item", m="import", p="create"),
),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
M("Expiration Report", c="inv", f="track_item",
vars=dict(report="exp")),
#M("Monetization Report", c="inv", f="inv_item",
# vars=dict(report="mon")),
#M("Utilization Report", c="inv", f="track_item",
# vars=dict(report="util")),
#M("Summary of Incoming Supplies", c="inv", f="track_item",
# vars=dict(report="inc")),
#M("Summary of Releases", c="inv", f="track_item",
# vars=dict(report="rel")),
),
#M(inv_recv_list, c="inv", f="recv")(
# M("New", m="create"),
# #M("Search"),
#),
M("Sent Shipments", c="inv", f="send")(
M("New", m="create"),
#M("Search"),
M("Search Shipped Items", f="track_item"),
),
M("Items", c="supply", f="item")(
M("New", m="create"),
#M("Search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("New", m="create"),
#M("Search"),
),
)
# -------------------------------------------------------------------------
def hrm(self):
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
settings = current.deployment_settings
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
return M(c="hrm")(
M(settings.get_hrm_staff_label(), f="staff",
check=manager_mode)(
M("New", m="create"),
#M("Search"),
M("Import", f="person", m="import",
vars={"group":"staff"}, p="create"),
),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
#M("Search"),
),
M("Department Catalog", f="department",
check=manager_mode)(
M("New", m="create"),
#M("Search"),
),
M("Job Title Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
#M("Search"),
),
M("Skill Catalog", f="skill",
check=manager_mode)(
M("New", m="create"),
#M("Search"),
#M("Skill Provisions", f="skill_provision"),
),
#M("Training Events", f="training_event",
# check=manager_mode)(
# M("New", m="create"),
# #M("Search"),
# M("Search Training Participants", f="training"),
# M("Import Participant List", f="training", m="import"),
#),
#M("Training Course Catalog", f="course",
# check=manager_mode)(
# M("New", m="create"),
# #M("Search"),
# #M("Course Certificates", f="course_certificate"),
#),
#M("Certificate Catalog", f="certificate",
# check=manager_mode)(
# M("New", m="create"),
# #M("Search"),
# #M("Skill Equivalence", f="certificate_skill"),
#),
#M("Reports", f="staff", m="report",
# check=manager_mode)(
# M("Staff Report", m="report"),
# M("Expiring Staff Contracts Report",
# vars=dict(expiring=1)),
# M("Training Report", f="training", m="report"),
#),
M("Personal Profile", f="person",
check=personal_mode, vars=dict(mode="personal")),
# This provides the link to switch to the manager mode:
M("Staff Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
M("Personal Profile", f="person",
check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def org(self):
""" ORG / Organization Registry """
#ADMIN = current.session.s3.system_roles.ADMIN
return M(c="org")(
M("Facilities", f="facility")(
M("New", m="create"),
#M("Review/Approve New", m="review"),
#M("Search"),
M("Map", m="map"),
M("Import", m="import")
),
M("Organizations", f="organisation")(
M("New", m="create"),
#M("Search"),
M("Import", m="import")
),
M("Facility Types", f="facility_type",
#restrict=[ADMIN]
)(
M("New", m="create"),
#M("Search"),
),
M("Networks", f="group",
#restrict=[ADMIN]
)(
M("New", m="create"),
#M("Search"),
),
M("Organization Types", f="organisation_type",
#restrict=[ADMIN]
)(
M("New", m="create"),
#M("Search"),
),
)
# -------------------------------------------------------------------------
def req(self):
""" REQ / Request Management """
db = current.db
SUPER = lambda i: \
db(db.auth_group.uuid=="super").select(db.auth_group.id,
limitby=(0, 1),
cache=s3db.cache
).first().id
return M(c="req")(
M("Requests", f="req")(
M("Request Supplies", m="create", vars={"type": 1}),
M("Request People", m="create", vars={"type": 3}),
M("Fulfill Requests"),
M("List Recurring Requests", f="req_template"),
#M("Map", m="map"),
M("Report", m="report"),
M("FEMA Items Required", f="fema",
restrict=[SUPER]),
M("Search Requested Items", f="req_item"),
M("Search Requested Skills", f="req_skill"),
),
#M("Priority Items", f="summary_option")(
# M("New", m="create"),
# M("Search"),
#),
M("Commitments", f="commit")(
#M("Search")
),
M("Sent Shipments", f="send")(
#M("New", m="create"),
#M("Search"),
#M("Search Shipped Items", f="track_item"),
),
M("Items", c="supply", f="item",
restrict=[SUPER])(
M("New", m="create"),
#M("Search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[SUPER])(
M("New", m="create"),
#M("Search"),
),
)
# END =========================================================================
|
{
"content_hash": "8e4e7a46315bab287b9ee3a3f9ccdd71",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 83,
"avg_line_length": 42.815561959654175,
"alnum_prop": 0.39092683583495996,
"repo_name": "gnarula/eden_deployment",
"id": "7a27b3b168440c31d6985e74866bb7f5e41f31ec",
"size": "14882",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "private/templates/SandyRelief/menus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1305178"
},
{
"name": "JavaScript",
"bytes": "16338028"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28218113"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2491556"
}
],
"symlink_target": ""
}
|
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.modules.pw_user
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import string
import random
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import destructiveTest, ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
# Import 3rd-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
class PwUserModuleTest(integration.ModuleCase):
def setUp(self):
super(PwUserModuleTest, self).setUp()
os_grain = self.run_function('grains.item', ['kernel'])
if os_grain['kernel'] != 'FreeBSD':
self.skipTest(
'Test not applicable to \'{kernel}\' kernel'.format(
**os_grain
)
)
def __random_string(self, size=6):
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_groups_includes_primary(self):
# Let's create a user, which usually creates the group matching the
# name
uname = self.__random_string()
if self.run_function('user.add', [uname]) is not True:
# Skip because creating is not what we're testing here
self.run_function('user.delete', [uname, True, True])
self.skipTest('Failed to create user')
try:
uinfo = self.run_function('user.info', [uname])
self.assertIn(uname, uinfo['groups'])
# This uid is available, store it
uid = uinfo['uid']
self.run_function('user.delete', [uname, True, True])
# Now, a weird group id
gname = self.__random_string()
if self.run_function('group.add', [gname]) is not True:
self.run_function('group.delete', [gname, True, True])
self.skipTest('Failed to create group')
ginfo = self.run_function('group.info', [gname])
# And create the user with that gid
if self.run_function('user.add', [uname, uid, ginfo['gid']]) is False:
# Skip because creating is not what we're testing here
self.run_function('user.delete', [uname, True, True])
self.skipTest('Failed to create user')
uinfo = self.run_function('user.info', [uname])
self.assertIn(gname, uinfo['groups'])
except AssertionError:
self.run_function('user.delete', [uname, True, True])
raise
if __name__ == '__main__':
from integration import run_tests
run_tests(PwUserModuleTest)
|
{
"content_hash": "a8f822230de4cbde93b8b4c557857800",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 86,
"avg_line_length": 33.08045977011494,
"alnum_prop": 0.5868658790826963,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "9fe7e21cc6e08ae43f010a003fa64dd18947e705",
"size": "2902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/integration/modules/pw_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
"""SI URL Feeds Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class SIUrlFeeds(APIClassTemplate):
"""The SIUrlFeeds Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"checksumURL",
"feedURL",
"updateFrequency",
"overrides",
"overridable",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/siurlfeeds"
def __init__(self, fmc, **kwargs):
"""
Initialize SIUrlFeeds object.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for SIUrlFeeds class.")
self.parse_kwargs(**kwargs)
def post(self):
"""POST method for API for SIUrlFeeds not supported."""
logging.info("POST method for API for SIUrlFeeds not supported.")
pass
def put(self):
"""PUT method for API for SIUrlFeeds not supported."""
logging.info("PUT method for API for SIUrlFeeds not supported.")
pass
def delete(self):
"""DELETE method for API for SIUrlFeeds not supported."""
logging.info("DELETE method for API for SIUrlFeeds not supported.")
pass
|
{
"content_hash": "cf03a27317aecef7fdbccc7aaeb901bc",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 28.145833333333332,
"alnum_prop": 0.5943745373797187,
"repo_name": "daxm/fmcapi",
"id": "b40bd83407a1845a6e9529fcd8b0d4e39fea59bd",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fmcapi/api_objects/object_services/siurlfeeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "985"
},
{
"name": "Python",
"bytes": "572788"
},
{
"name": "Shell",
"bytes": "1591"
}
],
"symlink_target": ""
}
|
def benchmark_hash_data():
"""
CommandLine:
python ~/code/ubelt/dev/bench_hash.py --convert=True --show
python ~/code/ubelt/dev/bench_hash.py --convert=False --show
"""
import ubelt as ub
#ITEM = 'JUST A STRING' * 100
ITEM = [0, 1, 'a', 'b', ['JUST A STRING'] * 4]
HASHERS = ['sha1', 'sha512', 'xxh32', 'xxh64', 'blake3']
scales = list(range(5, 13))
results = ub.AutoDict()
# Use json is faster or at least as fast it most cases
# xxhash is also significantly faster than sha512
convert = ub.argval('--convert', default='True').lower() == 'True'
print('convert = {!r}'.format(convert))
ti = ub.Timerit(9, bestof=3, verbose=1, unit='ms')
for s in ub.ProgIter(scales, desc='benchmark', verbose=3):
N = 2 ** s
print(' --- s={s}, N={N} --- '.format(s=s, N=N))
data = [ITEM] * N
for hasher in HASHERS:
for timer in ti.reset(hasher):
ub.hash_data(data, hasher=hasher, convert=convert)
results[hasher].update({N: ti.mean()})
col = {h: results[h][N] for h in HASHERS}
sortx = ub.argsort(col)
ranking = ub.dict_subset(col, sortx)
print('walltime: ' + ub.repr2(ranking, precision=9, nl=0))
best = next(iter(ranking))
#pairs = list(ub.iter_window( 2))
pairs = [(k, best) for k in ranking]
ratios = [ranking[k1] / ranking[k2] for k1, k2 in pairs]
nicekeys = ['{}/{}'.format(k1, k2) for k1, k2 in pairs]
relratios = ub.odict(zip(nicekeys, ratios))
print('speedup: ' + ub.repr2(relratios, precision=4, nl=0))
# xdoc +REQUIRES(--show)
# import pytest
# pytest.skip()
import pandas as pd
df = pd.DataFrame.from_dict(results)
df.columns.name = 'hasher'
df.index.name = 'N'
ratios = df.copy().drop(columns=df.columns)
for k1, k2 in [('sha512', 'xxh32'), ('sha1', 'xxh32'), ('xxh64', 'xxh32')]:
ratios['{}/{}'.format(k1, k2)] = df[k1] / df[k2]
print()
print('Seconds per iteration')
print(df.to_string(float_format='%.9f'))
print()
print('Ratios of seconds')
print(ratios.to_string(float_format='%.2f'))
print()
print('Average Ratio (over all N)')
print('convert = {!r}'.format(convert))
print(ratios.mean().sort_values())
if ub.argflag('--show'):
import kwplot
kwplot.autompl()
xdata = sorted(ub.peek(results.values()).keys())
ydata = ub.map_values(lambda d: [d[x] for x in xdata], results)
kwplot.multi_plot(xdata, ydata, xlabel='N', ylabel='seconds', title='convert = {}'.format(convert))
kwplot.show_if_requested()
def benchmark_hash_extensions():
""""
xdoctest ~/code/ubelt/dev/bench/bench_hash.py benchmark_hash_extensions
"""
import ubelt as ub
import uuid
import numpy as np
datas = [
ub.Path('/'),
uuid.uuid4(),
np.array([1, 2, 3])
]
import timerit
ti = timerit.Timerit(10000, bestof=10, verbose=2)
for timer in ti.reset('time'):
with timer:
for data in datas:
ub.hash_data(data)
if __name__ == '__main__':
"""
CommandLine:
python ~/code/ubelt/dev/bench_hash.py
"""
benchmark_hash_data()
|
{
"content_hash": "3ae7512dcde4ecd2bde052e7839caf3d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 107,
"avg_line_length": 36.455555555555556,
"alnum_prop": 0.5681194757695824,
"repo_name": "Erotemic/ubelt",
"id": "70d70952e8e41f07bd1e12a989423fc3f3204f55",
"size": "3282",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dev/bench/bench_hash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "933319"
},
{
"name": "Shell",
"bytes": "34905"
}
],
"symlink_target": ""
}
|
import os
import sys
def environ(key, bool=False, required=True):
if key not in os.environ:
if required:
sys.stderr.write(
"Set the environment variable {} correctly. It's required!".format(key)
)
sys.exit(2)
else:
return None
if bool:
return os.environ[key].lower() == "true"
return os.environ[key]
|
{
"content_hash": "0ea740da054d31d920e35aacf258e303",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 87,
"avg_line_length": 23.705882352941178,
"alnum_prop": 0.5483870967741935,
"repo_name": "watchdogpolska/feder",
"id": "038a48d32c458a5e2e032c5ac73098a57b667edd",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api_examples/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "183421"
},
{
"name": "JavaScript",
"bytes": "6245"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "574027"
},
{
"name": "SCSS",
"bytes": "40546"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
}
|
"""Conversion tool from EDF, EDF+, BDF to FIF
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import calendar
import datetime
import os
import re
import numpy as np
from ...utils import verbose, logger, warn
from ..utils import _blk_read_lims
from ..base import _BaseRaw, _check_update_montage
from ..meas_info import _empty_info
from ..constants import FIFF
from ...filter import resample
from ...externals.six.moves import zip
class RawEDF(_BaseRaw):
"""Raw object from EDF, EDF+, BDF file
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, montage, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None,
preload=False, verbose=None):
logger.info('Extracting edf Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
info, edf_info = _get_edf_info(input_fname, stim_channel,
annot, annotmap,
eog, misc, preload)
logger.info('Creating Raw.info structure...')
_check_update_montage(info, montage)
if bool(annot) != bool(annotmap):
warn("Stimulus Channel will not be annotated. Both 'annot' and "
"'annotmap' must be specified.")
# Raw attributes
last_samps = [edf_info['nsamples'] - 1]
super(RawEDF, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[edf_info],
last_samps=last_samps, orig_format='int',
verbose=verbose)
logger.info('Ready.')
@verbose
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data"""
from scipy.interpolate import interp1d
if mult is not None:
# XXX "cals" here does not function the same way as in RawFIF,
# and for efficiency we want to be able to combine mult and cals
# so proj support will have to wait until this is resolved
raise NotImplementedError('mult is not supported yet')
sel = np.arange(self.info['nchan'])[idx]
n_samps = self._raw_extras[fi]['n_samps']
buf_len = int(self._raw_extras[fi]['max_samp'])
sfreq = self.info['sfreq']
n_chan = self.info['nchan']
data_size = self._raw_extras[fi]['data_size']
data_offset = self._raw_extras[fi]['data_offset']
stim_channel = self._raw_extras[fi]['stim_channel']
tal_channel = self._raw_extras[fi]['tal_channel']
annot = self._raw_extras[fi]['annot']
annotmap = self._raw_extras[fi]['annotmap']
subtype = self._raw_extras[fi]['subtype']
# gain constructor
physical_range = np.array([ch['range'] for ch in self.info['chs']])
cal = np.array([ch['cal'] for ch in self.info['chs']])
gains = np.atleast_2d(self._raw_extras[fi]['units'] *
(physical_range / cal))
# physical dimension in uV
physical_min = np.atleast_2d(self._raw_extras[fi]['units'] *
self._raw_extras[fi]['physical_min'])
digital_min = self._raw_extras[fi]['digital_min']
offsets = np.atleast_2d(physical_min - (digital_min * gains)).T
if tal_channel is not None:
offsets[tal_channel] = 0
block_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, buf_len)
read_size = len(r_lims) * buf_len
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
start_offset = (data_offset +
block_start_idx * buf_len * n_chan * data_size)
ch_offsets = np.cumsum(np.concatenate([[0], n_samps * data_size]))
this_data = np.empty((len(sel), buf_len))
for bi in range(len(r_lims)):
block_offset = bi * ch_offsets[-1]
d_sidx, d_eidx = d_lims[bi]
r_sidx, r_eidx = r_lims[bi]
n_buf_samp = r_eidx - r_sidx
for ii, ci in enumerate(sel):
n_samp = n_samps[ci]
# bdf data: 24bit data
fid.seek(start_offset + block_offset + ch_offsets[ci], 0)
if n_samp == buf_len:
# use faster version with skips built in
fid.seek(r_sidx * data_size, 1)
ch_data = _read_ch(fid, subtype, n_buf_samp, data_size)
else:
# read in all the data and triage appropriately
ch_data = _read_ch(fid, subtype, n_samp, data_size)
if ci == tal_channel:
# don't resample tal_channel,
# pad with zeros instead.
n_missing = int(buf_len - n_samp)
ch_data = np.hstack([ch_data, [0] * n_missing])
ch_data = ch_data[r_sidx:r_eidx]
elif ci == stim_channel:
if annot and annotmap or tal_channel is not None:
# don't bother with resampling the stim ch
# because it gets overwritten later on.
ch_data = np.zeros(n_buf_samp)
else:
# Stim channel will be interpolated
oldrange = np.linspace(0, 1, n_samp + 1, True)
newrange = np.linspace(0, 1, buf_len, False)
newrange = newrange[r_sidx:r_eidx]
ch_data = interp1d(
oldrange, np.append(ch_data, 0),
kind='zero')(newrange)
else:
ch_data = resample(ch_data, buf_len, n_samp,
npad=0)[r_sidx:r_eidx]
this_data[ii, :n_buf_samp] = ch_data
data[:, d_sidx:d_eidx] = this_data[:, :n_buf_samp]
data *= gains.T[sel]
data += offsets[sel]
# only try to read the stim channel if it's not None and it's
# actually one of the requested channels
if stim_channel is not None and (sel == stim_channel).sum() > 0:
stim_channel_idx = np.where(sel == stim_channel)[0]
if annot and annotmap:
evts = _read_annot(annot, annotmap, sfreq,
self._last_samps[fi])
data[stim_channel_idx, :] = evts[start:stop + 1]
elif tal_channel is not None:
tal_channel_idx = np.where(sel == tal_channel)[0][0]
evts = _parse_tal_channel(data[tal_channel_idx])
self._raw_extras[fi]['events'] = evts
unique_annots = sorted(set([e[2] for e in evts]))
mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
stim = np.zeros(read_size)
for t_start, t_duration, annotation in evts:
evid = mapping[annotation]
n_start = int(t_start * sfreq)
n_stop = int(t_duration * sfreq) + n_start - 1
# make sure events without duration get one sample
n_stop = n_stop if n_stop > n_start else n_start + 1
if any(stim[n_start:n_stop]):
raise NotImplementedError('EDF+ with overlapping '
'events not supported.')
stim[n_start:n_stop] = evid
data[stim_channel_idx, :] = stim[start:stop]
else:
# Allows support for up to 17-bit trigger values (2 ** 17 - 1)
stim = np.bitwise_and(data[stim_channel_idx].astype(int),
131071)
data[stim_channel_idx, :] = stim
def _read_ch(fid, subtype, samp, data_size):
"""Helper to read a number of samples for a single channel"""
if subtype in ('24BIT', 'bdf'):
ch_data = np.fromfile(fid, dtype=np.uint8,
count=samp * data_size)
ch_data = ch_data.reshape(-1, 3).astype(np.int32)
ch_data = ((ch_data[:, 0]) +
(ch_data[:, 1] << 8) +
(ch_data[:, 2] << 16))
# 24th bit determines the sign
ch_data[ch_data >= (1 << 23)] -= (1 << 24)
# edf data: 16bit data
else:
ch_data = np.fromfile(fid, dtype='<i2', count=samp)
return ch_data
def _parse_tal_channel(tal_channel_data):
"""Parse time-stamped annotation lists (TALs) in stim_channel
and return list of events.
Parameters
----------
tal_channel_data : ndarray, shape = [n_samples]
channel data in EDF+ TAL format
Returns
-------
events : list
List of events. Each event contains [start, duration, annotation].
References
----------
http://www.edfplus.info/specs/edfplus.html#tal
"""
# convert tal_channel to an ascii string
tals = bytearray()
for s in tal_channel_data:
i = int(s)
tals.extend([i % 256, i // 256])
regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
tal_list = re.findall(regex_tal, tals.decode('ascii'))
events = []
for ev in tal_list:
onset = float(ev[0])
duration = float(ev[2]) if ev[2] else 0
for annotation in ev[3].split('\x14')[1:]:
if annotation:
events.append([onset, duration, annotation])
return events
def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, preload):
"""Extracts all the information from the EDF+,BDF file"""
if eog is None:
eog = []
if misc is None:
misc = []
edf_info = dict()
edf_info['annot'] = annot
edf_info['annotmap'] = annotmap
edf_info['events'] = []
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
fid.seek(168) # Seek 8 + 80 bytes for Subject id + 80 bytes for rec id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
subtype = fid.read(44).strip().decode()[:5]
if len(subtype) > 0:
edf_info['subtype'] = subtype
else:
edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower()
edf_info['n_records'] = n_records = int(fid.read(8).decode())
# record length in seconds
record_length = float(fid.read(8).decode())
if record_length == 0:
edf_info['record_length'] = record_length = 1.
warn('Header information is incorrect for record length. Default '
'record length set to 1.')
else:
edf_info['record_length'] = record_length
nchan = int(fid.read(4).decode())
channels = list(range(nchan))
ch_names = [fid.read(16).strip().decode() for ch in channels]
for ch in channels:
fid.read(80) # transducer
units = [fid.read(8).strip().decode() for ch in channels]
for i, unit in enumerate(units):
if unit == 'uV':
units[i] = 1e-6
else:
units[i] = 1
edf_info['units'] = units
physical_min = np.array([float(fid.read(8).decode())
for ch in channels])
edf_info['physical_min'] = physical_min
physical_max = np.array([float(fid.read(8).decode())
for ch in channels])
digital_min = np.array([float(fid.read(8).decode())
for ch in channels])
edf_info['digital_min'] = digital_min
digital_max = np.array([float(fid.read(8).decode())
for ch in channels])
prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1]
highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
for filt in prefiltering])
lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
for filt in prefiltering])
# number of samples per record
n_samps = np.array([int(fid.read(8).decode()) for ch in channels])
edf_info['n_samps'] = n_samps
fid.read(32 * nchan).decode() # reserved
assert fid.tell() == header_nbytes
physical_ranges = physical_max - physical_min
cals = digital_max - digital_min
if edf_info['subtype'] in ('24BIT', 'bdf'):
edf_info['data_size'] = 3 # 24-bit (3 byte) integers
else:
edf_info['data_size'] = 2 # 16-bit (2 byte) integers
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
chs = list()
tal_ch_name = 'EDF Annotations'
if tal_ch_name in ch_names:
tal_channel = ch_names.index(tal_ch_name)
else:
tal_channel = None
edf_info['tal_channel'] = tal_channel
if tal_channel is not None and stim_channel is not None and not preload:
raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
' parsed completely on loading.'
' You must set preload parameter to True.'))
if stim_channel == -1:
stim_channel = nchan - 1
pick_mask = np.ones(len(ch_names))
for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
ch_name, physical_range, cal = ch_info
chan_info = {}
chan_info['cal'] = cal
chan_info['logno'] = idx + 1
chan_info['scanno'] = idx + 1
chan_info['range'] = physical_range
chan_info['unit_mul'] = 0.
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['loc'] = np.zeros(12)
if ch_name in eog or idx in eog or idx - nchan in eog:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_EOG_CH
pick_mask[idx] = False
if ch_name in misc or idx in misc or idx - nchan in misc:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
pick_mask[idx] = False
check1 = stim_channel == ch_name
check2 = stim_channel == idx
check3 = nchan > 1
stim_check = np.logical_and(np.logical_or(check1, check2), check3)
if stim_check:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
pick_mask[idx] = False
chan_info['ch_name'] = 'STI 014'
ch_names[idx] = chan_info['ch_name']
units[idx] = 1
if isinstance(stim_channel, str):
stim_channel = idx
if tal_channel == idx:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
pick_mask[idx] = False
chs.append(chan_info)
edf_info['stim_channel'] = stim_channel
if any(pick_mask):
picks = [item for item, mask in zip(range(nchan), pick_mask) if mask]
edf_info['max_samp'] = max_samp = n_samps[picks].max()
else:
edf_info['max_samp'] = max_samp = n_samps.max()
# sfreq defined as the max sampling rate of eeg
sfreq = n_samps.max() / record_length
info = _empty_info(sfreq)
info['filename'] = fname
info['meas_date'] = calendar.timegm(date.utctimetuple())
info['chs'] = chs
if highpass.size == 0:
pass
elif all(highpass):
if highpass[0] == 'NaN':
pass # Placeholder for future use. Highpass set in _empty_info.
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = float(np.max(highpass))
warn('Channels contain different highpass filters. Highest filter '
'setting will be stored.')
if lowpass.size == 0:
pass
elif all(lowpass):
if lowpass[0] == 'NaN':
pass # Placeholder for future use. Lowpass set in _empty_info.
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = float(np.min(lowpass))
warn('Channels contain different lowpass filters. Lowest filter '
'setting will be stored.')
# Some keys to be consistent with FIF measurement info
info['description'] = None
info['buffer_size_sec'] = 10.
edf_info['nsamples'] = int(n_records * max_samp)
# These are the conditions under which a stim channel will be interpolated
if stim_channel is not None and not (annot and annotmap) and \
tal_channel is None and n_samps[stim_channel] != int(max_samp):
warn('Interpolating stim channel. Events may jitter.')
info._update_redundant()
return info, edf_info
def _read_annot(annot, annotmap, sfreq, data_length):
"""Annotation File Reader
Parameters
----------
annot : str
Path to annotation file.
annotmap : str
Path to annotation map file containing mapping from label to trigger.
sfreq : float
Sampling frequency.
data_length : int
Length of the data file.
Returns
-------
stim_channel : ndarray
An array containing stimulus trigger events.
"""
pat = '([+/-]\d+.\d+),(\w+)'
annot = open(annot).read()
triggers = re.findall(pat, annot)
times, values = zip(*triggers)
times = [float(time) * sfreq for time in times]
pat = '(\w+):(\d+)'
annotmap = open(annotmap).read()
mappings = re.findall(pat, annotmap)
maps = {}
for mapping in mappings:
maps[mapping[0]] = mapping[1]
triggers = [int(maps[value]) for value in values]
stim_channel = np.zeros(data_length)
for time, trigger in zip(times, triggers):
stim_channel[time] = trigger
return stim_channel
def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None,
preload=False, verbose=None):
"""Reader function for EDF+, BDF conversion to FIF
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of RawEDF
A Raw object containing EDF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc,
stim_channel=stim_channel, annot=annot, annotmap=annotmap,
preload=preload, verbose=verbose)
|
{
"content_hash": "319035e025be019379bf417fb15f348f",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 79,
"avg_line_length": 41.3048128342246,
"alnum_prop": 0.5529949939582255,
"repo_name": "ARudiuk/mne-python",
"id": "de9635ef548ec0bb3d7fd2242036e8799d11376d",
"size": "23172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/io/edf/edf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5086775"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import sys
from distutils.core import setup
name_ = 'algmoid'
version_ = '0.0.2'
if '--help'in sys.argv[1:] or sys.argv[1] in (
'--help-commands', 'egg_info', 'clean', '--version'):
cmdclass_ = {}
ext_modules_ = []
else:
import numpy
from pycodeexport.dist import pce_build_ext, PCEExtension
subsd = {
'FUNCS': 'sigmoid Dsigmoid D2sigmoid asigmoid Dasigmoid exps Dexps logs Dlogs'.split(),
}
cmdclass_ = {'build_ext': pce_build_ext}
def _render(build_temp, full_ext_path, ext, tmpl, out):
from pycodeexport.util import render_mako_template_to
render_mako_template_to(tmpl, out, subsd)
ext_modules_ = [
PCEExtension(
name_+"._algmoid",
sources=[name_+'/_algmoid_template.pyx'],
language='c++',
include_dirs=['./include', numpy.get_include()],
template_regexps=[
(r'^(\w+)_template.(\w+)$', r'\1.\2', subsd),
],
pycompilation_compile_kwargs={
'std': 'c++0x',
'options': ['pic', 'warn', 'fast']
},
pycompilation_link_kwargs={
'libs': ['m']
},
build_callbacks=[
(_render, ('./include/algmoid_template.h',
'./include/algmoid.h'), {}),
(_render, ('./include/algmoid_template.pxd',
'./include/algmoid.pxd'), {}),
],
logger=True,
)
]
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: C++",
"Programming Language :: Cython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
]
setup(
name=name_,
version=version_,
author='Björn Dahlgren',
author_email='bjodah@DELETEMEgmail.com',
description="Algebraic sigmoidal functions and damped exp/log",
license = "BSD",
url='https://github.com/bjodah/'+name_.lower(),
download_url='https://github.com/bjodah/'+name_.lower()+'/archive/v'+version_+'.tar.gz',
packages = [name_],
ext_modules=ext_modules_,
cmdclass = cmdclass_,
)
|
{
"content_hash": "aa4c84f2d1f804db80c06b660cdfdc6d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 95,
"avg_line_length": 32.32876712328767,
"alnum_prop": 0.5457627118644067,
"repo_name": "bjodah/algmoid",
"id": "1ddac4fdb39164bfce8e470184c588802f0398e0",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "11296"
}
],
"symlink_target": ""
}
|
import mock
import mongomock
try:
import pymongo
_HAVE_PYMONGO = True
except ImportError:
_HAVE_PYMONGO = False
from tests.multicollection import MultiCollection
from unittest import TestCase, skipIf
class BulkOperationsTest(TestCase):
test_with_pymongo = False
def setUp(self):
super(BulkOperationsTest, self).setUp()
if self.test_with_pymongo:
self.client = pymongo.MongoClient()
else:
self.client = mongomock.MongoClient()
self.db = self.client['somedb']
self.db.collection.drop()
for _i in "abx":
self.db.collection.create_index(
_i, unique=False, name="idx" + _i, sparse=True, background=True)
self.bulk_op = self.db.collection.initialize_ordered_bulk_op()
def __check_document(self, doc, count=1):
found_num = self.db.collection.find(doc).count()
if found_num != count:
all = list(self.db.collection.find())
self.fail("Document %s count()=%s BUT expected count=%s! All"
" documents: %s" % (doc, found_num, count, all))
def __check_result(self, result, **expecting_values):
for key in ('nModified', 'nUpserted', 'nMatched', 'writeErrors',
'upserted', 'writeConcernErrors', 'nRemoved', 'nInserted'):
exp_val = expecting_values.get(key)
has_val = result.get(key)
if self.test_with_pymongo and key == 'nModified' and has_val is None:
# ops, real pymongo did not returned 'nModified' key!
continue
self.assertFalse(has_val is None, "Missed key '%s' in result: %s" % (key, result))
if exp_val:
self.assertEqual(
exp_val, has_val, "Invalid result %s=%s (but expected value=%s)" % (
key, has_val, exp_val))
else:
self.assertFalse(
bool(has_val), "Received unexpected value %s = %s" % (key, has_val))
def __execute_and_check_result(self, write_concern=None, **expecting_result):
result = self.bulk_op.execute(write_concern=write_concern)
self.__check_result(result, **expecting_result)
def __check_number_of_elements(self, count):
has_count = self.db.collection.count()
self.assertEqual(
has_count, count, "There is %s documents but there should be %s" % (has_count, count))
def test__insert(self):
self.bulk_op.insert({"a": 1, "b": 2})
self.bulk_op.insert({"a": 2, "b": 4})
self.bulk_op.insert({"a": 2, "b": 6})
self.__check_number_of_elements(0)
self.__execute_and_check_result(nInserted=3)
self.__check_document({"a": 1, "b": 2})
self.__check_document({"a": 2, "b": 4})
self.__check_document({"a": 2, "b": 6})
def test__bulk_update_must_raise_error_if_missed_operator(self):
self.assertRaises(ValueError, self.bulk_op.find({"a": 1}).update, {"b": 20})
def test__bulk_execute_must_raise_error_if_bulk_empty(self):
self.assertRaises(mongomock.InvalidOperation, self.bulk_op.execute)
def test_update(self):
self.bulk_op.find({"a": 1}).update({"$set": {"b": 20}})
self.__execute_and_check_result()
self.__check_number_of_elements(0)
def test__update_must_update_all_documents(self):
self.db.collection.insert({"a": 1, "b": 2})
self.db.collection.insert({"a": 2, "b": 4})
self.db.collection.insert({"a": 2, "b": 8})
self.bulk_op.find({"a": 1}).update({"$set": {"b": 20}})
self.bulk_op.find({"a": 2}).update({"$set": {"b": 40}})
self.__check_document({"a": 1, "b": 2})
self.__check_document({"a": 2, "b": 4})
self.__check_document({"a": 2, "b": 8})
self.__execute_and_check_result(nMatched=3, nModified=3)
self.__check_document({"a": 1, "b": 20})
self.__check_document({"a": 2, "b": 40}, 2)
def test__ordered_insert_and_update(self):
self.bulk_op.insert({"a": 1, "b": 2})
self.bulk_op.find({"a": 1}).update({"$set": {"b": 3}})
self.__execute_and_check_result(nInserted=1, nMatched=1, nModified=1)
self.__check_document({"a": 1, "b": 3})
def test__update_one(self):
self.db.collection.insert({"a": 2, "b": 1})
self.db.collection.insert({"a": 2, "b": 2})
self.bulk_op.find({"a": 2}).update_one({"$set": {"b": 3}})
self.__execute_and_check_result(nMatched=1, nModified=1)
self.__check_document({"a": 2}, count=2)
self.__check_number_of_elements(2)
def test__remove(self):
self.db.collection.insert({"a": 2, "b": 1})
self.db.collection.insert({"a": 2, "b": 2})
self.bulk_op.find({"a": 2}).remove()
self.__execute_and_check_result(nRemoved=2)
self.__check_number_of_elements(0)
def test__remove_one(self):
self.db.collection.insert({"a": 2, "b": 1})
self.db.collection.insert({"a": 2, "b": 2})
self.bulk_op.find({"a": 2}).remove_one()
self.__execute_and_check_result(nRemoved=1)
self.__check_document({"a": 2}, 1)
self.__check_number_of_elements(1)
def test_upsert_replace_one_on_empty_set(self):
self.bulk_op.find({}).upsert().replace_one({"x": 1})
self.__execute_and_check_result(nUpserted=1, upserted=[{"index": 0, "_id": mock.ANY}])
def test_upsert_replace_one(self):
self.db.collection.insert({"a": 2, "b": 1})
self.db.collection.insert({"a": 2, "b": 2})
self.bulk_op.find({"a": 2}).replace_one({"x": 1})
self.__execute_and_check_result(nModified=1, nMatched=1)
self.__check_document({"a": 2}, 1)
self.__check_document({"x": 1}, 1)
self.__check_number_of_elements(2)
def test_upsert_update_on_empty_set(self):
self.bulk_op.find({}).upsert().update({"$set": {"a": 1, "b": 2}})
self.__execute_and_check_result(nUpserted=1, upserted=[{"index": 0, "_id": mock.ANY}])
self.__check_document({"a": 1, "b": 2})
self.__check_number_of_elements(1)
def test_upsert_update(self):
self.db.collection.insert({"a": 2, "b": 1})
self.db.collection.insert({"a": 2, "b": 2})
self.bulk_op.find({"a": 2}).upsert().update({"$set": {"b": 3}})
self.__execute_and_check_result(nMatched=2, nModified=2)
self.__check_document({"a": 2, "b": 3}, 2)
self.__check_number_of_elements(2)
def test_upsert_update_one(self):
self.db.collection.insert({"a": 2, "b": 1})
self.db.collection.insert({"a": 2, "b": 1})
self.bulk_op.find({"a": 2}).upsert().update_one({"$inc": {"b": 1, "x": 1}})
self.__execute_and_check_result(nModified=1, nMatched=1)
self.__check_document({"a": 2, "b": 1}, 1)
self.__check_document({"a": 2, "b": 2, "x": 1}, 1)
self.__check_number_of_elements(2)
@skipIf(not _HAVE_PYMONGO, "pymongo not installed")
class BulkOperationsWithPymongoTest(BulkOperationsTest):
test_with_pymongo = True
@skipIf(not _HAVE_PYMONGO, "pymongo not installed")
class CollectionComparisonTest(TestCase):
def setUp(self):
super(CollectionComparisonTest, self).setUp()
self.fake_conn = mongomock.MongoClient()
self.mongo_conn = pymongo.MongoClient()
self.db_name = "mongomock___testing_db"
self.collection_name = "mongomock___testing_collection"
self.mongo_conn[self.db_name][self.collection_name].remove()
self.cmp = MultiCollection({
"fake": self.fake_conn[self.db_name][self.collection_name],
"real": self.mongo_conn[self.db_name][self.collection_name],
})
self.bulks = MultiCollection({
"fake": self.cmp.conns["fake"].initialize_ordered_bulk_op(),
"real": self.cmp.conns["real"].initialize_ordered_bulk_op()
})
# hacky! Depending on mongo server version 'nModified' is returned or not..
# so let make simple bulk operation to know what's the server behaviour...
coll = self.mongo_conn[self.db_name]["mongomock_testing_prepare_test"]
bulk = coll.initialize_ordered_bulk_op()
bulk.insert({'a': 1})
insert_returns_nmodified = "nModified" in bulk.execute()
bulk = self.cmp.conns["real"].initialize_ordered_bulk_op()
bulk.find({'a': 1}).update({'$set': {'a': 2}})
update_returns_nmodified = "nModified" in bulk.execute()
coll.drop()
self.bulks.conns["fake"]._set_nModified_policy(
insert_returns_nmodified, update_returns_nmodified)
def test__insert(self):
self.bulks.do.insert({"a": 1, "b": 1})
self.bulks.do.insert({"a": 2, "b": 2})
self.bulks.do.insert({"a": 2, "b": 2})
self.bulks.compare.execute()
def test__mixed_operations(self):
self.cmp.do.insert({"a": 1, "b": 3})
self.cmp.do.insert({"a": 2, "c": 1})
self.cmp.do.insert({"a": 2, "c": 2})
self.cmp.do.insert({"a": 3, "c": 1})
self.cmp.do.insert({"a": 4, "d": 2})
self.cmp.do.insert({"a": 5, "d": 11})
self.cmp.do.insert({"a": 5, "d": 22})
self.bulks.do.insert({"a": 1, "b": 1})
for bwo in self.bulks.do.find({"a": 2}).values():
bwo.remove_one()
for bwo in self.bulks.do.find({"a": 3}).values():
bwo.update({"$inc": {"b": 1}})
for bwo in self.bulks.do.find({"a": 4}).values():
bwo.upsert().replace_one({"b": 11, "x": 'y'})
for bwo in self.bulks.do.find({"a": 5}).values():
bwo.upsert().update({"$inc": {"b": 11}})
self.bulks.compare.execute()
self.cmp.compare.find(sort=[("a", 1), ("b", 1), ("c", 1), ("d", 1)])
|
{
"content_hash": "6413f7e9ee1755cbd27662429c5981ce",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 98,
"avg_line_length": 41.205882352941174,
"alnum_prop": 0.5632711328642807,
"repo_name": "marcinbarczynski/mongomock",
"id": "7ec33f52b12510c7de74522880dbcaa8fb1fd7c0",
"size": "9807",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test__bulk_operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "359"
},
{
"name": "Python",
"bytes": "190551"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup, find_packages
requirements = [
"argparse",
"pip",
]
test_requirements = [
"mock",
"nose",
"six",
"unittest2six",
]
if sys.version_info < (2, 7):
requirements.append("ordereddict")
test_requirements.append("unittest2")
root = os.path.dirname(__file__)
def long_description():
readme = os.path.join(root, "README.rst")
long_description = open(readme, "r").read()
return long_description
def version():
init = os.path.join(root, "pundler", "__init__.py")
version = None
for line in open(init, "r"):
if line.startswith("__version__"):
version = line.split("=")[-1].strip().replace('\"', '')
assert version is not None, "Unable to determine version!"
return version
setup(name="Pundler",
version=version(),
description='An attempt to better manage dependencies in requirements files inspired by Ruby\'s Gem Bundler',
long_description=long_description(),
classifiers=['Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development',
'Topic :: Utilities'],
author='Mike Steder',
author_email='steder@gmail.com',
url='https://github.com/steder/pundler',
packages=find_packages(),
scripts=["bin/pundler"],
install_requires=requirements,
tests_require=test_requirements,
test_suite="pundler",
)
|
{
"content_hash": "0a9f747052b8c9380bd8c1f7aa7b5b3a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 115,
"avg_line_length": 27.557377049180328,
"alnum_prop": 0.6020226055919096,
"repo_name": "steder/pundler",
"id": "728b0ecc0274635311d12935ee69d71d2eaab524",
"size": "1703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10059"
}
],
"symlink_target": ""
}
|
import re
import sys
import os
import datetime
import json
#處理掉unicode 和 str 在ascii上的問題
reload(sys)
sys.setdefaultencoding('utf8')
#aha's library
from PTT import PTT_DB,PTT
if __name__ =="__main__":
ptt = PTT('https://www.ptt.cc/bbs/Loan/index.html')
db = PTT_DB(os.path.dirname(__file__)+"/mongodb.inf","loan")
now = datetime.datetime.now()
t = datetime.datetime(now.year,now.month,now.day) - datetime.timedelta(days=1)
print t
if len(sys.argv)>1:
limit = int(sys.argv[1])
else:
limit = 0
posts = ptt.fetchData(t,limit)
#Append News
print db.bulkInsertNews(posts)
|
{
"content_hash": "0591eb5efb29496b60756ab6be4e5f5b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 20,
"alnum_prop": 0.68,
"repo_name": "ntuaha/NewsInsight",
"id": "76442eaa6abce465e001d1000d482393b1b10a9d",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lab2/rawdata/PTT_LOAN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "341"
},
{
"name": "Python",
"bytes": "74044"
}
],
"symlink_target": ""
}
|
from apps.recurring_donations.models import MonthlyDonor
from rest_framework import permissions
class IsOwner(permissions.BasePermission):
""" Read / write permissions are only allowed if the obj.user is the logged in user. """
def has_object_permission(self, request, view, obj):
return obj.user == request.user
class IsDonor(permissions.BasePermission):
def _get_donor_from_request(self, request):
if request.DATA:
order_id = request.DATA.get('donation', None)
else:
order_id = request.QUERY_PARAMS.get('donation', None)
if order_id:
try:
project = MonthlyDonor.objects.get(id=order_id)
return project
except MonthlyDonor.DoesNotExist:
return None
else:
return None
def has_permission(self, request, view):
donor = self._get_donor_from_request(request)
if donor:
return donor.user == request.user
return True
def has_object_permission(self, request, view, obj):
return obj.donor.user == request.user
|
{
"content_hash": "853de264f79f13887fd815f6e3b846e2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 92,
"avg_line_length": 32.05714285714286,
"alnum_prop": 0.6265597147950089,
"repo_name": "onepercentclub/onepercentclub-site",
"id": "3d892215e233f1c40996559869ee5a4f9bb10470",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/recurring_donations/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13896"
},
{
"name": "CSS",
"bytes": "351343"
},
{
"name": "HTML",
"bytes": "898027"
},
{
"name": "Handlebars",
"bytes": "246489"
},
{
"name": "JavaScript",
"bytes": "168884"
},
{
"name": "Python",
"bytes": "1511371"
},
{
"name": "Ruby",
"bytes": "1050"
},
{
"name": "Shell",
"bytes": "74046"
}
],
"symlink_target": ""
}
|
import os
import re
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
try:
from cms.models import get_plugin_media_path
except ImportError:
def get_plugin_media_path(instance, filename):
"""
See cms.models.pluginmodel.get_plugin_media_path on django CMS 3.0.4+
for information
"""
return instance.get_media_path(filename)
from cms.utils.compat.dj import python_2_unicode_compatible
@python_2_unicode_compatible
class Flash(CMSPlugin):
file = models.FileField(
_('file'), upload_to=get_plugin_media_path,
help_text=_('use swf file'))
width = models.CharField(_('width'), max_length=6)
height = models.CharField(_('height'), max_length=6)
def __str__(self):
return u"%s" % os.path.basename(self.file.path)
def get_height(self):
return fix_unit(self.height)
def get_width(self):
return fix_unit(self.width)
def fix_unit(value):
if not re.match(r'.*[0-9]$', value):
# no unit, add px
return value + "px"
return value
|
{
"content_hash": "1484501b56f856d2cb6c1a3d3e8dd8d3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 26.209302325581394,
"alnum_prop": 0.650399290150843,
"repo_name": "divio/djangocms-flash",
"id": "595b31e7ded42714db4e2ac5fab3a25272603c6d",
"size": "1127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangocms_flash/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2376"
},
{
"name": "Python",
"bytes": "9164"
}
],
"symlink_target": ""
}
|
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import numbers
import six
if six.PY3:
long = int
from google.protobuf.internal import api_implementation
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def SupportsOpenEnums(field_descriptor):
return field_descriptor.containing_type.syntax == "proto3"
def GetTypeChecker(field):
"""Returns a type checker for a message field of the specified types.
Args:
field: FieldDescriptor object for this field.
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
"""Type check the provided value and return it.
The returned value might have been normalized to another type.
"""
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
return proposed_value
class TypeCheckerWithDefault(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), six.integer_types))
raise TypeError(message)
if not self._MIN <= int(proposed_value) <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
proposed_value = self._TYPE(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
class EnumValueChecker(object):
"""Checker used for enum fields. Performs type-check and range check."""
def __init__(self, enum_type):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), six.integer_types))
raise TypeError(message)
if int(proposed_value) not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
return proposed_value
def DefaultValue(self):
return self._enum_type.values[0].number
class UnicodeValueChecker(object):
"""Checker used for string fields.
Always returns a unicode value, even if the input is of type str.
"""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, six.text_type)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, six.text_type)))
raise TypeError(message)
# If the value is of type 'bytes' make sure that it is valid UTF-8 data.
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
return proposed_value
def DefaultValue(self):
return ""
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
_TYPE = int
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
_TYPE = int
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
_TYPE = int
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
_TYPE = int
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeCheckerWithDefault(
0.0, numbers.Real),
_FieldDescriptor.CPPTYPE_FLOAT: TypeCheckerWithDefault(
0.0, numbers.Real),
_FieldDescriptor.CPPTYPE_BOOL: TypeCheckerWithDefault(
False, bool, numbers.Integral),
_FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
|
{
"content_hash": "0c9eeb01739642acf6df87d59ecffaf5",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 80,
"avg_line_length": 38.9938080495356,
"alnum_prop": 0.7383882493052799,
"repo_name": "ryfeus/lambda-packs",
"id": "f3f6d69fa11754bc7fc83ee5da7b6211a8dd50de",
"size": "14226",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Tensorflow_Pandas_Numpy/source3.6/google/protobuf/internal/type_checkers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.time import Time
'''
Create a text file for FDBinary.
First column will be ln(wavelength).
The following columns will be data from one spectrum each.
This program REMOVES BCVs and GAMMA VELOCITY because FDBinary doesn't handle that.
INPUT:
text file with a list of FITS spectra, single column
text file with BCVs corresponding to each, three columns (filename, BJD_obs, BCV)
hardwired time reference zeropoint (e.g., 2454833 for Kepler data)
hardwired systemic (gamma) RV
hardwired values defining the wavelength array to use
OUTPUT:
text file with 1st column as ln wavelength and subsequent columns for each spectrum
this text file is properly formatted for use with FDBinary
'''
#### EDIT THIS STUFF EACH TIME YOU RUN THIS PROGRAM!!! ####
#infiles = '../../FDBinary/9246715/infiles_apogee_flat2.txt' #'infiles_shifted.txt'
#bcvin = '../../FDBinary/9246715/bjds_baryvels_apogee.txt' #'infile_bcvs.txt'
#outfile = '../../FDBinary/9246715/chunk_all_ln_apogee_flat.obs'
infiles = '../../FDBinary/9246715/infiles_shifted.txt'
bcvin = '../../FDBinary/9246715/bjds_baryvels.txt'
outfile = '../../FDBinary/9246715/chunk_all_ln_all_4400-8700.obs'
#infiles = '../../FDBinary/9291629/infiles_fd3.txt'
#bcvin = '../../FDBinary/9291629/bjds_baryvels.txt'
#outfile = '../../FDBinary/9291629/chunk_all_ln_8550.obs'
jdref0 = 2454833.0 # SAME FOR ALL (Kepler zeropoint)
gamma = -4.48 # 9246715
#gamma = -16.5427 #9970396
#gamma = -30.662 #9291629
#gamma = -6.1507 #5786154
#gamma = -103.5147 #10001167
#gamma = -39.285 #7037405
#gamma = 14.8138996798 #3955867
isAPOGEE = False
wavestart = 4400 #15145 #5320 # starting wavelength in Angstroms
wavestop = 8700 #16950 #7120 # ending wavelength in Angstroms
#### EDIT THIS STUFF EACH TIME YOU RUN THIS PROGRAM!!! ####
c = 2.99792e5 # km/sec
def read_specfiles(infiles = infiles, bjdinfile = bcvin, isAPOGEE = isAPOGEE):
'''
Based on BF_functions' read_specfiles, but modified slightly for FDBinary purposes.
This function can handle both FITS and TXT input spectra in standard or APOGEE format.
'''
f1 = open(infiles)
speclist = []; wavelist = []; dwaves = []
filenamelist = []; datetimelist = []
i = 0
for line in f1: # This loop happens once for each spectrum
infile = line.rstrip()
if infile[-3:] == 'txt':
print('You have a text file. Reading BJD date from bjdinfile, not FITS header.')
# treat it like a text file
filenamelist.append(infile)
datetime = np.loadtxt(bjdinfile, comments='#', usecols=(1,), unpack=True)[i]
datetimelist.append(Time(datetime, scale='utc', format='jd'))
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
if isAPOGEE == True: # we need to normalize it and sort by wavelength
spec = spec / np.median(spec)
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
if infile[0:5] == 'trans': # you have a model telluric spectrum in nm, not A
wave = wave*10
wavelist.append(wave)
speclist.append(spec)
else:
# assume it's a FITS file
# Read in the FITS file with all the data in the primary HDU
hdu = fits.open(infile)
if isAPOGEE == True: # APOGEE: the data is in a funny place, backwards, not normalized, and in VACUUM WAVELENGTHS !!
spec = hdu[1].data ### APOGEE
spec = spec.flatten() ### APOGEE
spec = spec[::-1] ### APOGEE
spec = spec / np.median(spec)
else: # non-APOGEE (regular) option
spec = hdu[0].data
head = hdu[0].header
filenamelist.append(infile)
try:
datetime = head['date-obs']
except:
datetime = head['date']
datetimelist.append(Time(datetime, scale='utc', format='isot'))
# Define the original wavelength scale
if isAPOGEE == True: # APOGEE: read wavelength values straight from FITS file
wave = hdu[4].data ### APOGEE
wave = wave.flatten() ### APOGEE
wave = wave[::-1] ### APOGEE
else: # non-APOGEE (linear): create wavelength values from header data
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
if len(wave) != len(spec): # The wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # hopefully, at least
if logcheck == 'log angstroms':
wave = np.power(10,wave) # make it linear
spec = spec / np.median(spec) # also normalize it to 1
wavelist.append(wave)
speclist.append(spec)
# Regardless of whether it's a FITS or TXT file, save the wavelength grid spacing
dwave = wave[1] - wave[0]
dwaves.append(dwave)
i = i + 1
# save the total number of spectra
nspec = i
f1.close()
return nspec, filenamelist, datetimelist, wavelist, speclist, dwaves
# Read in the spectra in whatever form they are in
nspec, filenamelist, datetimelist, wavelist, speclist, dwaves = read_specfiles(infiles, bcvin, isAPOGEE)
bcvs = np.loadtxt(bcvin, comments='#', usecols=(2,), unpack=True)
# Set the resolution of the new wavelength grid based on the lowest-res input spectrum
dwaveref = np.max(dwaves)
dlnwave = np.log(wavestart + dwaveref) - np.log(wavestart)
# Create wavelength array of interest
#dlnwave = np.log(np.power(10,dlogwave)) # ln-wavelength spacing
lnwavestart = np.log(wavestart) # ln-wavelength start value
lnwavestop = np.log(wavestop) # ln-wavelength end value
dlnwavelen = (lnwavestop - lnwavestart) / dlnwave # length of ln-wavelength grid
lnwaveref = np.arange(dlnwavelen)*dlnwave + lnwavestart # ln-wavelength reference array
#print(np.power(10,dlogwave))
# Mess with the spectra so they are at zero RV and evenly spaced in natural-log-land
lnwavelist = []
newspeclist = []
for i, wave in enumerate(wavelist):
wave = wave * (-1*gamma/c) + wave # remove systemic gamma velocity shift
wave = wave * (bcvs[i]/c) + wave # remove barycentric velocity
lnwavelist.append(np.log(wave))
for i in range (0, nspec):
newspec = np.interp(lnwaveref, lnwavelist[i], speclist[i])
newspeclist.append(newspec)
# Print useful information to screen
if len(lnwavelist) != len(bcvs):
print('Length of BCV list does not match number of spectra! Fix this!!')
print(' ')
print('This info is for the fdbinary infile:')
for datetime in datetimelist:
print(datetime.jd-jdref0, 0, 1.0, 0.05, 0.95) # ADJUST LAST TWO FIGURES FOR LIGHT RATIO
print(' ')
print('The new ln-wavelength scale spans %.4f - %.4f with stepsize %.8f.' % (lnwaveref[0], lnwaveref[-1], dlnwave))
print('In linear wavelengths, this is {0} - {1} with stepsize {2}.'.format(wavestart, wavestop, dwaveref))
# Write waveref (1st column) and newspeclist (2nd--Nth columns) to outfile
# (This reads the first element of each newspeclist array and saves it as a string)
f2 = open(outfile, 'w')
print('# ' + str(nspec+1) + ' X ' + str(len(lnwaveref)), file=f2)
for i in range(0, len(lnwaveref)):
newstring = str(lnwaveref[i])
for j in range(0, nspec):
newstring += '\t' + str(newspeclist[j][i])
print(newstring, file=f2)
f2.close()
print(' ')
print('Result printed to %s' % outfile)
print('The next thing to do is run fdbinary!')
print('(You may need to \'make_fdbinary_infile.py\' first)')
print('Here is what to type to run fdbinary once those files exist:')
print('for file in infile_chunk*.txt; do ./fd3 < "$file"; done; rm allchunks.mod; cat outfile_chunk*.mod > allchunks.mod; rm allchunks.rvs; cat outfile_chunk*.rvs > allchunks.rvs')
|
{
"content_hash": "13d7ec9cb6c430a99495e20b0881a865",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 180,
"avg_line_length": 46.86666666666667,
"alnum_prop": 0.6429587482219061,
"repo_name": "mrawls/FDBinary-tools",
"id": "d4698c882ddec8cbd9cffe1b7490713026a446fb",
"size": "8436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectra2txt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46433"
},
{
"name": "CSS",
"bytes": "303"
},
{
"name": "HTML",
"bytes": "12014"
},
{
"name": "Makefile",
"bytes": "475"
},
{
"name": "Python",
"bytes": "41246"
}
],
"symlink_target": ""
}
|
import sys
import os
import re
import json
class Request(object):
def __init__(self, environ):
self.environ = environ
self.method = environ['REQUEST_METHOD']
self.path = environ['PATH_INFO']
class Response(object):
def __init__(self):
self.status = "200 OK"
self.headers = {
'Content-Type': "text/html;charset=utf-8",
}
def header_list(self):
return [ (k, v) for k, v in self.headers.items() ]
@property
def content_type(self):
return self.headers['Content-Type']
@content_type.setter
def content_type(self, value):
self.headers['Content-Type'] = value
class BaseAction(object):
def __init__(self, req, resp):
self.req = req
self.resp = resp
def before_action(self):
pass
def after_action(self, ex):
pass
def invoke_action(self, func, kwargs):
content = func(self, **kwargs)
return content
def handle_action(self, func, kwargs):
ex = None
try:
self.before_action()
return self.invoke_action(func, kwargs)
except Exception as ex_:
ex = ex_
raise
finally:
self.after_action(ex)
class Action(BaseAction):
def invoke_action(self, func, kwargs):
content = BaseAction.invoke_action(self, func, kwargs)
if isinstance(content, dict):
content = json.dumps(content, ensure_ascii=False)
self.resp.content_type = "application/json"
return content
def on(req_meth, urlpath):
## @on() の呼び出し元のローカル変数すべてを辞書として取り出す
localvars = sys._getframe(1).f_locals
## ローカル変数 __mapping__ を取得する。なければ空のリストを設定。
## 例:
## mapping == [
## # urlpath # funcs
## (r'/' , {'GET': do_list, 'POST': do_create}),
## (r'/{id}', {'GET': do_show, 'PUT': do_edit, 'DELETE': do_delete}),
## ]
mapping = localvars.setdefault('__mapping__', [])
## リストからURLパスに応じた辞書を検索する。なければリストに追加。
for upath, funcs in mapping:
if upath == urlpath:
break
else:
funcs = {}
mapping.append((urlpath, funcs))
## 同じURLパスに同じリクエストメソッドですでに登録済みならエラー
if req_meth in funcs:
raise ValueError("@on(%r, %r): duplicated." % (req_meth, urlpath))
## 辞書に関数を登録するようなデコレータを返す
def deco(func):
funcs[req_meth] = func
return func
return deco
class HelloAction(Action):
ITEMS = [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Charlie"},
]
@on('GET', r'.json')
def do_index(self):
return {
"items": self.ITEMS,
}
@on('GET', r'/{name:<\w+>}.json')
def do_show(self, name):
for x in self.ITEMS:
if x['name'] == name:
break
else:
self.resp.status = "404 Not Found"
return {"error": "404 Not Found"}
msg = "Hello, %s!" % name
return {"message": msg}
class EnvironAction(Action):
@on('GET', r'')
def do_render(self):
environ = self.req.environ
buf = []
for key in sorted(environ.keys()):
if key in os.environ:
continue
val = environ[key]
typ = "(%s)" % type(val).__name__
buf.append("%-25s %-7s %r\n" % (key, typ, val))
content = "".join(buf)
self.resp.content_type = "text/plain;charset=utf-8"
return content
class FormAction(Action):
@on('GET', r'')
def do_form(self):
req_meth = self.req.method
html = ('<p>REQUEST_METHOD: %r</p>\n'
'<form method="POST" action="/public/form">\n'
'<input type="submit">\n'
'</form>\n')
return html % req_meth
@on('POST', r'')
def do_post(self):
req_meth = self.req.method
html = ('<p>REQUEST_METHOD: %r</p>\n'
'<p><a href="/public/form">back</p>\n')
return html % req_meth
mapping_list = [
['/public', [
('/hello' , HelloAction),
('/environ' , EnvironAction),
('/form' , FormAction),
]],
]
class ActionMapping(object):
def __init__(self, mapping_list):
self._mapping_list = self._build(mapping_list, [])
def _build(self, mapping_list, new_list, base_urlpath=""):
for urlpath, target in mapping_list:
current_urlpath = base_urlpath + urlpath
if isinstance(target, list):
child_list = target
self._build(child_list, new_list, current_urlpath)
else:
klass = target
self._validate_action_class(klass)
for upath, funcs in getattr(klass, '__mapping__'):
full_urlpath = current_urlpath + upath
rexp = re.compile(self._convert_urlpath(full_urlpath))
t = (full_urlpath, rexp, klass, funcs)
new_list.append(t)
return new_list
def _validate_action_class(self, klass):
if not isinstance(klass, type):
raise TypeError("%r: expected action class." % (klass,))
if not issubclass(klass, BaseAction):
raise TypeError("%r: should be a subclass of BaseAction." % klass)
if not hasattr(klass, '__mapping__'):
raise ValueError("%r: no mapping data." % klass)
def _convert_urlpath(self, urlpath): # ex: '/api/foo/{id}.json'
def _re_escape(string):
return re.escape(string).replace(r'\/', '/')
#
param_rexps = {'str': r'[^/]+', 'int': r'\d+'}
buf = ['^']; add = buf.append
pos = 0
for m in re.finditer(r'(.*?)\{(\w+)(:\w*)?(<[^>]*>)?\}', urlpath):
pos = m.end(0) # ex: 13
string, pname, ptype, prexp = m.groups() # ex: ('/api/foo/', 'id')
if ptype: ptype = ptype[1:] # ex: ':int' -> 'int'
if prexp: prexp = prexp[1:-1] # ex: '<\d+>' -> '\d+'
#
if not ptype:
ptype = 'str'
if ptype not in param_rexps:
raise ValueError("%r: contains unknown data type %r." \
% (urlpath, ptype))
if not prexp:
prexp = param_rexps[ptype]
#
add(_re_escape(string))
add('(?P<%s>%s)' % (pname, prexp)) # ex: '(?P<id>[^/]+)'
remained = urlpath[pos:] # ex: '.json'
add(_re_escape(remained))
add('$')
return "".join(buf) # ex: '^/api/foo/(?P<id>[^/]+)\\.json$'
def lookup(self, req_path):
for _, rexp, klass, funcs in self._mapping_list:
m = rexp.match(req_path)
if m:
kwargs = m.groupdict() # ex: {"id": 123}
# ex: return FooAction, {"GET": do_show}, {"id": 123}
return klass, funcs, kwargs
return None, None, None
class WSGIApplication(object):
def __init__(self, mapping_list):
if isinstance(mapping_list, ActionMapping):
self._mapping = mapping_list
else:
self._mapping = ActionMapping(mapping_list)
def lookup(self, req_path):
return self._mapping.lookup(req_path)
def __call__(self, environ, start_response):
req = Request(environ)
resp = Response()
#
req_meth = req.method
req_path = req.path
klass, funcs, kwargs = self.lookup(req_path)
#
if klass is None:
status = "404 Not Found"
content = "<h2>%s</h2>" % status
elif req_meth not in funcs:
status = "405 Method Not Allowed"
content = "<h2>%s</h2>" % status
else:
func = funcs[req_meth]
action = klass(req, resp)
content = action.handle_action(func, kwargs)
status = resp.status
if req_meth == 'HEAD':
content = ""
#
headers = resp.header_list()
start_response(status, headers)
return [content.encode('utf-8')]
wsgi_app = WSGIApplication(mapping_list)
if __name__ == "__main__":
from wsgiref.simple_server import make_server
wsgi_server = make_server('localhost', 7000, wsgi_app)
wsgi_server.serve_forever()
|
{
"content_hash": "c3f82e8ee55f172e636d26edef8a5dcd",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 29.6113074204947,
"alnum_prop": 0.5107398568019093,
"repo_name": "kwatch/step-by-step",
"id": "a54b4e5013a9b9976f7d27aaa35e228987fd2629",
"size": "8726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework_python/fw16.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169302"
}
],
"symlink_target": ""
}
|
import unittest
import check_orderfile
import symbol_extractor
class TestCheckOrderFile(unittest.TestCase):
_SYMBOL_INFOS = [symbol_extractor.SymbolInfo('first', 0x1, 0, ''),
symbol_extractor.SymbolInfo('second', 0x2, 0, ''),
symbol_extractor.SymbolInfo('notProfiled', 0x4, 0, ''),
symbol_extractor.SymbolInfo('third', 0x3, 0, ''),]
def testVerifySymbolOrder(self):
self.assertTrue(check_orderfile._VerifySymbolOrder(
['.second', 'first', 'eighth', 'third'],
self._SYMBOL_INFOS, 0))
self.assertFalse(check_orderfile._VerifySymbolOrder(
['second', 'first', 'eighth', 'third'],
self._SYMBOL_INFOS, 0))
self.assertTrue(check_orderfile._VerifySymbolOrder(
['second', 'first', 'eighth', 'third'],
self._SYMBOL_INFOS, 1))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f060344648a9d53456ea6de0d7ad4073",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 34.15384615384615,
"alnum_prop": 0.6171171171171171,
"repo_name": "chromium/chromium",
"id": "439a7977edbe6ae7e9cada2f80f7eeaec7aa639d",
"size": "1053",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tools/cygprofile/check_orderfile_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Support to interface with the Plex API."""
import logging
from homeassistant.components.media_player.errors import BrowseError
from .const import DOMAIN
EXPANDABLES = ["album", "artist", "playlist", "season", "show"]
PLAYLISTS_BROWSE_PAYLOAD = {
"title": "Playlists",
"media_content_id": "all",
"media_content_type": "playlists",
"can_play": False,
"can_expand": True,
}
_LOGGER = logging.getLogger(__name__)
def browse_media(
entity_id, plex_server, media_content_type=None, media_content_id=None
):
"""Implement the websocket media browsing helper."""
def build_item_response(payload):
"""Create response payload for the provided media query."""
media = plex_server.lookup_media(**payload)
if media is None:
return None
media_info = item_payload(media)
if media_info.get("can_expand"):
media_info["children"] = []
for item in media:
media_info["children"].append(item_payload(item))
return media_info
if (
media_content_type == "server"
and media_content_id != plex_server.machine_identifier
):
raise BrowseError(
f"Plex server with ID '{media_content_id}' is not associated with {entity_id}"
)
if media_content_type in ["server", None]:
return server_payload(plex_server)
if media_content_type == "library":
return library_payload(plex_server, media_content_id)
if media_content_type == "playlists":
return playlists_payload(plex_server)
payload = {
"media_type": DOMAIN,
"plex_key": int(media_content_id),
}
response = build_item_response(payload)
if response is None:
raise BrowseError(f"Media not found: {media_content_type} / {media_content_id}")
return response
def item_payload(item):
"""Create response payload for a single media item."""
payload = {
"title": item.title,
"media_content_id": str(item.ratingKey),
"media_content_type": item.type,
"can_play": True,
}
if hasattr(item, "thumbUrl"):
payload["thumbnail"] = item.thumbUrl
if item.type in EXPANDABLES:
payload["can_expand"] = True
return payload
def library_section_payload(section):
"""Create response payload for a single library section."""
return {
"title": section.title,
"media_content_id": section.key,
"media_content_type": "library",
"can_play": False,
"can_expand": True,
}
def server_payload(plex_server):
"""Create response payload to describe libraries of the Plex server."""
server_info = {
"title": plex_server.friendly_name,
"media_content_id": plex_server.machine_identifier,
"media_content_type": "server",
"can_play": False,
"can_expand": True,
}
server_info["children"] = []
for library in plex_server.library.sections():
if library.type == "photo":
continue
server_info["children"].append(library_section_payload(library))
server_info["children"].append(PLAYLISTS_BROWSE_PAYLOAD)
return server_info
def library_payload(plex_server, library_id):
"""Create response payload to describe contents of a specific library."""
library = plex_server.library.sectionByID(library_id)
library_info = library_section_payload(library)
library_info["children"] = []
for item in library.all():
library_info["children"].append(item_payload(item))
return library_info
def playlists_payload(plex_server):
"""Create response payload for all available playlists."""
playlists_info = {**PLAYLISTS_BROWSE_PAYLOAD, "children": []}
for playlist in plex_server.playlists():
playlists_info["children"].append(item_payload(playlist))
return playlists_info
|
{
"content_hash": "bd7bcba9621fd0245576c405bc472aee",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 90,
"avg_line_length": 31.088,
"alnum_prop": 0.6353576942871848,
"repo_name": "titilambert/home-assistant",
"id": "ac316edb938c914bc1d5a43df2a32b2e7c0e1b4c",
"size": "3886",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/plex/media_browser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
from anibots import *
import breve
class AnibotPhysicsSim( breve.PhysicalControl ):
def __init__( self ):
breve.Control.__init__( self )
self.bots = breve.objectList()
self.actionDuration = 45
self.iterCount=0
self.videoLog = breve.Movie()
self.block = None
#configure the anibots
self.env = None
self.numBots = 1
self.iterations = 20
self.kDepth = 3
self.takeTurns = True
self.anibotConfig = AnibotConfig("final-x-g.dat", "final-x-pedge.dat","pweights-alt.dat")
self.anibotConfig.proxify = False
# bool proxyWeightsProportional;
# float proxyWeightsProportion;
#bool randomizeEdges
self.anibotConfig.randomize = False
#self.anibotConfig.quant = 11
#self.anibotConfig.quantDiff = 1
#anibotConfig.quantIrregular;
#self.anibotConfig.randMin = 0
#self.anibotConfig.randMax = 10
# bool singleTops;
AnibotPhysicsSim.init( self )
def init( self ):
print '''Setting up Anibot environment'''
# start the anibots environment (mental simulation)
#self.env = AgentEnv("test_anibots_exp0.py",self.kDepth,self.takeTurns)
#self.env.NewAnibot(self.numBots,self.anibotConfig)
#self.env.InitLoner(0)
#self.env.InitTransaction(0,1)
print '''Setting up Physics Sim.'''
#start the visual/physical environment in Breve
self.setDisplayText( "Anibots Sim", -1.0, 0.8, 1 )
self.setRandomSeedFromDevRandom()
self.enableFastPhysics()
self.setFastPhysicsIterations( 15 )
#self.setGravity( breve.vector(0.0,-3.0,0.0) )
self.enableLighting()
self.enableSmoothDrawing()
self.moveLight( breve.vector( 20, 30, 20 ) )
floor = breve.createInstances( breve.Floor, 1 )
floor.catchShadows()
#floor.setE( 1.000000 )
floor.setMu(0.0)
#floor.showAxis()
self.cloudTexture = breve.createInstances( breve.Image, 1 ).load( 'images/clouds.png' )
self.enableShadowVolumes()
self.enableReflections()
self.setBackgroundColor( breve.vector( 0.400000, 0.600000, 0.900000 ) )
self.setBackgroundTextureImage( self.cloudTexture )
#self.offsetCamera( breve.vector( 3, 13, -13 ) )
self.pointCamera( breve.vector( 0, 0, 0 ), breve.vector( 20, 20, 60 ) )
#the virtual bodies
self.bots = breve.createInstances( breve.AnibotBody, 2 )
self.bots[0].move( breve.vector( 0.00001, self.bots[0].radius, 14 ) )
self.bots[1].move( breve.vector( -0.00001, self.bots[1].radius, 14 ) )
#self.env.Next()
#the block
self.block = breve.createInstances( breve.Mobile, 1 )
shape = breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(15,3,4) )
shape.setMass(0.5)
self.block.setShape(shape)
self.block.setColor(breve.vector( 1.0, 0.5 ,0.0 ))
self.block.move( breve.vector( 0.0, 1.5 ,0.0 ) )
self.block.setMu(0.0)
#self.block.setE(0.1)
self.block.enablePhysics()
print self.block.getMass()
#self.block.setForce( breve.vector( 500.0, 500.0 , 500.0 ) )
#self.block.setVelocity( breve.vector( 0, 0, -10 ) )
#self.watch( self.bots[0] )
self.watch( self.block )
self.videoLog.record("anibots-pushtest2center.mpg")
def iterate( self ):
s2 = "block dist: %.2f" % (-self.block.getLocation()[2])
self.setDisplayText(s2, -1.0, 0.5, 6)
#self.iterCount = self.iterCount +1
#if self.iterCount==1:
# self.bots[1].move( breve.vector( 0.1, self.bots[1].radius, self.bots[1].getLocation()[2] ) )
breve.Control.iterate( self )
breve.AnibotPhysicsSim = AnibotPhysicsSim
class AnibotBody( breve.Mobile ):
def __init__( self ):
breve.Mobile.__init__( self )
self.radius = 1.5
AnibotBody.init( self )
def init( self ):
shape = breve.createInstances( breve.Sphere, 1 ).initWith( self.radius )
shape.setDensity(100)
self.setShape( shape )
#self.setShape( breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(self.radius,self.radius,self.radius) ))
self.setColor( breve.randomExpression( breve.vector( 1.000000, 1.000000, 1.000000 ) ) )
#self.move( breve.vector( breve.randomExpression(8.0)-4.0, self.radius, breve.randomExpression(20.0) + 8.0 ) )
self.move( breve.vector( 0.0, self.radius, 14.0 ) )
print self.getMass()
self.enablePhysics()
#self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
#self.setForce( breve.vector( 0.0, 0.0, -100.0 ) )
def moveX( self, x ):
if self.getLocation()[0] != x:
z = self.getLocation()[2]
self.move( breve.vector( x, self.radius, z+2 ) )
def iterate( self ):
#print self.getVelocity()
self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
breve.AnibotBody = AnibotBody
# Create an instance of our controller object to initialize the simulation
AnibotPhysicsSim()
|
{
"content_hash": "96917552a1f8a609c30c7e555dc4934e",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 119,
"avg_line_length": 33.63503649635037,
"alnum_prop": 0.6861979166666666,
"repo_name": "SynapticNulship/Anibots",
"id": "dadfcce92063322d830e2741a1ed43d27dda022b",
"size": "5136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sim_py/anibots_breve_pushtest2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "266830"
},
{
"name": "Python",
"bytes": "61549"
}
],
"symlink_target": ""
}
|
import io
import json
import os
import unittest
from . import person
from .fhirdate import FHIRDate
class PersonTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Person", js["resourceType"])
return person.Person(js)
def testPerson1(self):
inst = self.instantiate_from("person-example-f002-ariadne.json")
self.assertIsNotNone(inst, "Must have instantiated a Person instance")
self.implPerson1(inst)
js = inst.as_json()
self.assertEqual("Person", js["resourceType"])
inst2 = person.Person(js)
self.implPerson1(inst2)
def implPerson1(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.birthDate.date, FHIRDate("1963").date)
self.assertEqual(inst.birthDate.as_json(), "1963")
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.name[0].text, "Ariadne Bor-Jansma")
self.assertEqual(inst.name[0].use, "usual")
self.assertEqual(inst.photo.contentType, "image/jpeg")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "home")
self.assertEqual(inst.telecom[0].value, "+31201234567")
self.assertEqual(inst.text.status, "generated")
def testPerson2(self):
inst = self.instantiate_from("person-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Person instance")
self.implPerson2(inst)
js = inst.as_json()
self.assertEqual("Person", js["resourceType"])
inst2 = person.Person(js)
self.implPerson2(inst2)
def implPerson2(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "PleasantVille")
self.assertEqual(inst.address[0].line[0], "534 Erewhon St")
self.assertEqual(inst.address[0].postalCode, "3999")
self.assertEqual(inst.address[0].state, "Vic")
self.assertEqual(inst.address[0].use, "home")
self.assertEqual(inst.birthDate.date, FHIRDate("1974-12-25").date)
self.assertEqual(inst.birthDate.as_json(), "1974-12-25")
self.assertEqual(inst.gender, "male")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].period.start.date, FHIRDate("2001-05-06").date)
self.assertEqual(inst.identifier[0].period.start.as_json(), "2001-05-06")
self.assertEqual(inst.identifier[0].system, "urn:oid:1.2.36.146.595.217.0.1")
self.assertEqual(inst.identifier[0].type.coding[0].code, "MR")
self.assertEqual(inst.identifier[0].type.coding[0].system, "http://hl7.org/fhir/v2/0203")
self.assertEqual(inst.identifier[0].use, "usual")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.name[0].family[0], "Chalmers")
self.assertEqual(inst.name[0].given[0], "Peter")
self.assertEqual(inst.name[0].given[1], "James")
self.assertEqual(inst.name[0].use, "official")
self.assertEqual(inst.name[1].given[0], "Jim")
self.assertEqual(inst.name[1].use, "usual")
self.assertEqual(inst.telecom[0].use, "home")
self.assertEqual(inst.telecom[1].system, "phone")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "(03) 5555 6473")
self.assertEqual(inst.text.status, "generated")
|
{
"content_hash": "190391af4ecb378e36769180b9345c67",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 97,
"avg_line_length": 45.06172839506173,
"alnum_prop": 0.6531506849315069,
"repo_name": "all-of-us/raw-data-repository",
"id": "c19b18faa7a3d2ff094fdf3688e51b40f4a97475",
"size": "3775",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_1_0_6/models/person_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
import paramiko,socket
# ssh function
def sshConnect(ip, username, password, command):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(ip, username=username, password=password)
stdin, stdout, stderr = client.exec_command(command)
#print stderr
result_before = stdout.read()
#print result_before
result = result_before.splitlines()
print "Success!! connection",
except paramiko.AuthenticationException:
print "Authentication problem"
result = None
except socket.error, e:
print "Comunication problem "
result = None
client.close()
return result
# main function
if __name__ == "__main__":
ip = "10.100.0.100"
username = "junos"
password = "junos123"
command = "show chassis alarm"
result = sshConnect(ip, username, password, command)
print result
|
{
"content_hash": "8b561d8537d63081e07396c831be488b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 29.87878787878788,
"alnum_prop": 0.6511156186612576,
"repo_name": "trjones841/pynet_class",
"id": "37a3558f24639c2e910b6673347ff34d52d55182",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercises/Week4/juniper_paramiko.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11482"
},
{
"name": "Hack",
"bytes": "333"
},
{
"name": "Python",
"bytes": "47602"
}
],
"symlink_target": ""
}
|
from __future__ import (print_function, division, absolute_import, unicode_literals)
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import viewsets
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import detail_route, list_route, permission_classes
from rest_framework.response import Response
from .models import Card, CardType
from .serializers import CardSerializer, CardTypeSerializer, ModelCardSerializer, ModelCardTypeSerializer
class CardViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = Card.objects.all()
serializer_class = CardSerializer
class CardTypeViewSet(viewsets.ModelViewSet):
queryset = CardType.objects.all()
serializer_class = CardTypeSerializer
class ModelCardViewSet(viewsets.ModelViewSet):
queryset = Card.objects.all()
serializer_class = ModelCardSerializer
@list_route()
def count(self, request):
return Response(Card.objects.count())
@detail_route()
def modified(self, request, pk):
try:
card = Card.objects.get(pk=pk)
return Response(card.modified)
except ObjectDoesNotExist:
return Response('Card not Exist', status=status.HTTP_400_BAD_REQUEST)
def create(self, request):
return super(ModelCardViewSet, self).create(request)
class ModelCardTypeViewSet(viewsets.ModelViewSet):
queryset = CardType.objects.all()
serializer_class = ModelCardTypeSerializer
|
{
"content_hash": "c92fd69c57048830014cab99a9e12e0e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 105,
"avg_line_length": 32.3125,
"alnum_prop": 0.75177304964539,
"repo_name": "mrjmad/gnu_linux_mag_drf",
"id": "e74f559d3414ee56acc7c8812da7c7260cda47d2",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hall_of_cards/cardsgame/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12082"
}
],
"symlink_target": ""
}
|
from cc_server.services.files.__main__ import app, prepare
application = app
prepare()
|
{
"content_hash": "1cf8b2c8e8852af2a7444df47a3d5ff6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 58,
"avg_line_length": 22,
"alnum_prop": 0.75,
"repo_name": "curious-containers/cc-server",
"id": "3a5e2714a4c6a5c6f88b66a806718202d0a0949b",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cc_server/services/files/wsgi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "490"
},
{
"name": "Python",
"bytes": "133428"
},
{
"name": "Shell",
"bytes": "3323"
}
],
"symlink_target": ""
}
|
from django.db import models
import uuid
class UserAccess(models.Model):
ip_addr = models.GenericIPAddressField(max_length=30)
lang_code = models.CharField(max_length=10)
timestamp = models.DateTimeField(auto_now=True)
# accepted value: yes | no | na (not available)
accepted = models.CharField(max_length=5)
email = models.EmailField(null=True, blank=True)
@classmethod
def create(cls, ip_addr, lang_code, accepted):
useraccess = cls(ip_addr=ip_addr, lang_code=lang_code,
accepted=accepted)
return useraccess
class AccessLocation(models.Model):
access_id = models.OneToOneField(UserAccess)
country = models.CharField(max_length=50,null=True,blank=True)
city = models.CharField(max_length=50,null=True,blank=True)
lat = models.FloatField(null=True,blank=True)
lon = models.FloatField(null=True,blank=True)
timestamp = models.DateTimeField(auto_now=True)
@classmethod
def create(cls, access_id, country, city, lat, lon):
accesslocation = cls(access_id=access_id,country=country,city=city,
lat=lat,lon=lon)
return accesslocation
|
{
"content_hash": "039590678009d90048744bd221a218cc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 34.85294117647059,
"alnum_prop": 0.6818565400843882,
"repo_name": "coolkang/cmcweb",
"id": "921bd8d0f6df3ee5a9f3dee379da87de70b416bf",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmcprj/webpages/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4893"
},
{
"name": "HTML",
"bytes": "114799"
},
{
"name": "Python",
"bytes": "25660"
}
],
"symlink_target": ""
}
|
import numpy as np
import six
from chainercv.utils.testing.assertions.assert_is_bbox import assert_is_bbox
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
def assert_is_bbox_dataset(dataset, n_fg_class, n_example=None):
"""Checks if a dataset satisfies the bounding box dataset API.
This function checks if a given dataset satisfies the bounding box dataset
API or not.
If the dataset does not satifiy the API, this function raises an
:class:`AssertionError`.
Args:
dataset: A dataset to be checked.
n_fg_class (int): The number of foreground classes.
n_example (int): The number of examples to be checked.
If this argument is specified, this function picks
examples ramdomly and checks them. Otherwise,
this function checks all examples.
"""
assert len(dataset) > 0, 'The length of dataset must be greater than zero.'
if n_example:
for _ in six.moves.range(n_example):
i = np.random.randint(0, len(dataset))
_check_example(dataset[i], n_fg_class)
else:
for i in six.moves.range(len(dataset)):
_check_example(dataset[i], n_fg_class)
def _check_example(example, n_fg_class):
assert len(example) >= 3, \
'Each example must have at least three elements:' \
'img, bbox and label.'
img, bbox, label = example[:3]
assert_is_image(img, color=True)
assert_is_bbox(bbox, size=img.shape[1:])
assert isinstance(label, np.ndarray), \
'label must be a numpy.ndarray.'
assert label.dtype == np.int32, \
'The type of label must be numpy.int32.'
assert label.shape[1:] == (), \
'The shape of label must be (*,).'
assert len(label) == len(bbox), \
'The length of label must be same as that of bbox.'
if len(label) > 0:
assert label.min() >= 0 and label.max() < n_fg_class, \
'The value of label must be in [0, n_fg_class - 1].'
|
{
"content_hash": "1defd2588a2f728a57ecd8c70da5d7f8",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 35.280701754385966,
"alnum_prop": 0.6364992541024366,
"repo_name": "pfnet/chainercv",
"id": "96ac8591ca497df76e9e899a86f2c2af7aa1e04d",
"size": "2011",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chainercv/utils/testing/assertions/assert_is_bbox_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317937"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from abc import ABCMeta, abstractmethod, abstractproperty
import os, shlex, zlib, fnmatch, datetime
from urlparse import urlparse
from castlib3.models.filesystem import File, Folder
# Keeps all the declared backends classes indexed by their schemes
gCastlibBackends = {}
class BackendMetaclass(ABCMeta):
def __new__(cls, clsname, bases, dct):
global gCastlibBackends
# Require new backend class to inherit the AbstractBackend:
baseCorrect = clsname == 'AbstractBackend'
if not baseCorrect:
for base in bases:
if issubclass(base, AbstractBackend):
baseCorrect = True
break
if not baseCorrect:
raise TypeError( '%s is not a subclass of AbstractBackend.'%clsname )
if clsname != 'AbstractBackend':
backendClsDetails = dct.pop( '__backendAttributes__' )
scheme = backendClsDetails['scheme']
dct['scheme'] = scheme # set the class property
if scheme in gCastlibBackends.keys():
raise KeyError('Backend class for scheme "%s" had been '
'declared before.'%scheme )
# ... other backend attributes?
classInstance = super(BackendMetaclass, cls) \
.__new__( cls, clsname, bases, dct )
if clsname != 'AbstractBackend':
gCastlibBackends[scheme] = classInstance
return classInstance
class AbstractBackend(object):
__metaclass__ = BackendMetaclass
"""
An interfacing class defining how to handle common operations with
filesystem entries within particular environment.
"""
@abstractmethod
def get_adler32(self, path):
"""Shall return the hexidecimal digest built from the file content."""
pass
@abstractmethod
def get_size(self, path):
"""Returns size (in bytes) of given file. Directories aren't supported."""
pass
@abstractmethod
def get_modified(self, path):
"""The return value is a number giving the number of seconds since the epoch."""
pass
@abstractmethod
def set_modified(self, path, dtObject):
"""
Must set modified timestamp of filesystem entry identified by given URI
to given datetime object.
"""
pass
@abstractmethod
def get_permissions(self, path):
"""Returns encoded permissions."""
return os.stat( path ).st_mode & 0o777
@abstractmethod
def listdir(self, path):
"""
Returns a list contining the names of the entries in the directory
given by path.
"""
pass
@abstractmethod
def isfile(self, path):
"""
Return True if path is an existing regular file. This follows symbolic
links, so both islink() and isfile() can be true for the same path.
"""
pass
@abstractmethod
def isdir(self, path):
"""
Return True if path is an existing directory. This follows symbolic
links, so both islink() and isdir() can be true for the same path.
"""
pass
@abstractmethod
def islink(self, path):
"""
Return True if path refers to a directory entry that is a symbolic
link. Always False if symbolic links are not supported by the Python
runtime.
"""
pass
@abstractmethod
def get_dir_content( dirPath, virtualPath, onlyPats=None, ignorePats=None, extra={} ):
"""
Utility method returning list in form:
[
{
'folder' : <pathPair>,
'files' : [<filename1>, <filename2>, ...]
'subFolders' : [ <dir1>, <dir2>, ... ],
... #
},
...
]
Will be invoked once by filesystem.discover_entries(). See additional
information there.
"""
pass
@abstractmethod
def uri_from_path(self, path, *args, **kwargs):
pass
@abstractmethod
def del_file(self, path):
"""
Has to delete file referenced by given URI.
"""
pass
@abstractmethod
def cpy_file(self, srcURI, dstURI, backends={} ):
"""
Has to copy file from srcURI location to dstURI location using given
backends.
"""
pass
def rewrite_file(self, srcURI, dstURI, backends={}):
"""
This method may be used for incremental appending/patching the file
(in case of size or checksum mismatch). The default implementation
does not rely on appending/patching, but just deletes the copy and
copies original instead of it.
"""
origLPP = urlparse( srcURI )
assert( (origLPP.scheme or 'file') in backends.keys() )
delResult = self.del_file( dstURI )
return delResult, self.cpy_file( srcURI, dstURI, backends=backends )
def new_file( self, path, **kwargs):
"""
Shall return new file instance created using backend functions.
Additional keyword arguments will be forwarded directly to underlying
sqlalchemy ctr. The keyword arguments takes precedence on the ones
obtained using back-end.
"""
kwd = dict(kwargs)
sf = kwd.pop('syncFields', ['modified', 'size'])
for k in sf:
if k in kwd.keys():
# Attribute was explicitly set.
continue
kwd[k] = getattr(self, 'get_' + k)(path)
#print( '\nXXX:', kwd ) # XXX
return File( **kwd )
def new_folder( self, path, **kwargs ):
"""
Returns new folder database entry object created using the backend
functions. The keyword arguments takes precedence on the ones obtained
using back-end.
"""
kwd = dict(kwargs)
for k in kwd.pop('syncFields', ['modified']):
if k in kwd.keys():
# Attribute was explicitly set.
continue
kwd[k] = getattr(self, 'get_' + k)(path)
return Folder( **kwd )
#
# Local backend
def adler32( filename, blocksize=65536 ):
checksum = zlib.adler32("")
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
checksum = zlib.adler32(block, checksum)
return checksum & 0xffffffff
class LocalBackend(AbstractBackend):
"""
This implementation implies local filesystem operations only.
The particular backend will be selected basing on string identifier
usually prefixing the URIs.
"""
__metaclass__ = BackendMetaclass
__backendAttributes__ = {
'scheme' : 'file'
}
def get_adler32(self, path):
"""Shall return the hexidecimal digest built from the file content."""
#raise NotImplementedError('Checksum calculation for %s algorithm '
# 'is not being implemented.'%algo)
lpp = urlparse(path)
return '%x'%adler32( lpp.path )
def get_size(self, path):
"""Returns size (in bytes) of given file. Directories aren't supported."""
lpp = urlparse(path)
return os.path.getsize(lpp.path)
def get_modified(self, path):
lpp = urlparse(path)
mt = os.path.getmtime(lpp.path)
return datetime.datetime.fromtimestamp(mt)
def set_modified(self, path, dtObject):
raise NotImplementedError()
def get_permissions(self, path):
"""Returns encoded permissions."""
lpp = urlparse(path)
return os.stat( lpp.path ).st_mode & 0o777
def listdir(self, path):
"""
Returns a list contining the names of the entries in the directory
given by path.
"""
lpp = urlparse(path)
return os.listdir(lpp.path)
def isfile(self, path):
"""
Return True if path is an existing regular file. This follows symbolic
links, so both islink() and isfile() can be true for the same path.
"""
lpp = urlparse(path)
return os.path.isfile(lpp.path)
def isdir(self, path):
"""
Return True if path is an existing directory. This follows symbolic
links, so both islink() and isdir() can be true for the same path.
"""
lpp = urlparse(path)
return os.path.isdir(lpp.path)
def islink(self, path):
"""
Return True if path refers to a directory entry that is a symbolic
link. Always False if symbolic links are not supported by the Python
runtime.
"""
path = urlparse(path)
return os.path.islink(path)
def del_file(self, path):
raise NotImplementedError() # TODO
def cpy_file(self, srcURI, dstURI, backends={} ):
raise NotImplementedError() # TODO
def get_dir_content(self, dirPath, onlyPats=None, ignorePats=None, extra={} ):
# Get list of all files and sub-directories in current dir
contentLst = [f for f in self.listdir(dirPath) \
if self.isfile(os.path.join(dirPath, f)) \
or self.isdir(os.path.join(dirPath, f))]
if onlyPats:
if type(onlyPats) is str:
onlyPats = [onlyPats,]
contentLst_ = []
for wcard in onlyPats:
contentLst_.extend( fnmatch.filter(contentLst, wcard) )
contentLst = contentLst_
if ignorePats:
if type(ignorePats) is str:
ignorePats = [ignorePats,]
for wcard in ignorePats:
contentLst = list(filter( lambda nm : \
not fnmatch.fnmatch(nm, wcard), contentLst ))
files = [ f for f in contentLst if self.isfile(os.path.join(dirPath, f)) ]
subds = [ d for d in contentLst if self.isdir(os.path.join(dirPath, d)) ]
ret = {
'folder' : dirPath,
'files' : set(files),
'subFolders' : []
}
for subd in subds:
ret['subFolders'].append(
self.get_dir_content( os.path.join(dirPath, subd),
onlyPats=onlyPats, ignorePats=ignorePats) )
ret.update(extra)
return ret
def uri_from_path(self, path):
return 'file://' + path
|
{
"content_hash": "b9fd7f4e1d082f5bcd08a619568d9c9e",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 90,
"avg_line_length": 34.17763157894737,
"alnum_prop": 0.5836381135707411,
"repo_name": "CrankOne/castlib",
"id": "11e1db15172366a41adfc1490b2abb8612e8fe99",
"size": "11574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "castlib3/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2484"
},
{
"name": "Python",
"bytes": "141612"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
import cv2.cv as cv
import frame
import time
class Camera:
# If Camera is initialized with frame_path, Camera will cache a Frame from disk for further reading
# If image cannot be found from the disk, Camera snapshots a new image and caches it
def __init__(self, frame_path = None):
self.cap = cv2.VideoCapture(0)
self.width, self.height = 320, 320
self.cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, self.width)
self.cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, self.height)
time.sleep(1) # wait until camera has been initialized
self.cache_frame(frame_path)
def cache_frame(self, frame_path):
self.cached_frame = None
if not frame_path: return
img = cv2.imread(frame_path)
if img is None:
self.snapshot(frame_path)
img = cv2.imread(frame_path)
self.cached_frame = frame.Frame(img)
# Reads raw frame
def read(self):
ret, frame = self.cap.read()
# rotate frame
rotation_matrix = cv2.getRotationMatrix2D((self.width / 2, self.height / 2), 90, 1)
rotated = cv2.warpAffine(frame, rotation_matrix, (self.width, self.height))
return rotated
# Reads raw frame and creates a Frame object
def read_frame(self):
return self.cached_frame if self.cached_frame else frame.Frame(self.read())
def snapshot(self, filename = 'frame.jpg'):
cv2.imwrite(filename, self.read())
self.close()
def close(self):
self.cap.release()
|
{
"content_hash": "c8ab6f600481c1e01a943cdfb99a1305",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 103,
"avg_line_length": 31.693877551020407,
"alnum_prop": 0.6387636831938184,
"repo_name": "mlensment/rebot",
"id": "4e486900240d55d336809299027b5bac2be389fe",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18255"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
}
|
import discord, time
from discord.ext import commands
from Cogs import Utils, PCPP, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Hw(bot, settings))
# This is the Uptime module. It keeps track of how long the bot's been up
class Hw(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.hwactive = {}
self.charset = "0123456789"
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def gen_id(self):
# Just use the current time as that shouldn't ever be the same (unless a user
# manages to do this twice in < 1 second)
return str(time.time())
@commands.command(pass_context=True)
async def cancelhw(self, ctx):
"""Cancels a current hardware session."""
if str(ctx.author.id) in self.hwactive:
self._stop_hw(ctx.author)
await ctx.send("You've left your current hardware session!".format(ctx.prefix))
return
await ctx.send("You're not in a current hardware session.")
def _stop_hw(self, author):
if str(author.id) in self.hwactive:
del self.hwactive[str(author.id)]
@commands.command(pass_context=True)
async def sethwchannel(self, ctx, *, channel: discord.TextChannel = None):
"""Sets the channel for hardware (admin only)."""
if not await Utils.is_admin_reply(ctx): return
if channel == None:
self.settings.setServerStat(ctx.guild, "HardwareChannel", "")
msg = 'Hardware works *only* in pm now.'
return await ctx.send(msg)
# If we made it this far - then we can add it
self.settings.setServerStat(ctx.guild, "HardwareChannel", channel.id)
msg = 'Hardware channel set to **{}**.'.format(channel.name)
await ctx.send(Utils.suppressed(ctx,msg))
@sethwchannel.error
async def sethwchannel_error(self, error, ctx):
# do stuff
msg = 'sethwchannel Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def pcpp(self, ctx, url = None, style = None, escape = None):
"""Convert a pcpartpicker.com link into markdown parts. Available styles: normal, md, mdblock, bold, and bolditalic."""
usage = "Usage: `{}pcpp [url] [style=normal, md, mdblock, bold, bolditalic] [escape=yes/no (optional)]`".format(ctx.prefix)
if not style:
style = 'normal'
if not url:
return await ctx.send(usage)
if escape == None:
escape = 'no'
escape = escape.lower() in ["yes","true","on","enable","enabled"]
output = await PCPP.getMarkdown(url, style, escape)
if not output:
msg = 'Something went wrong! Make sure you use a valid pcpartpicker link.'
return await ctx.send(msg)
if len(output) > 2000:
msg = "That's an *impressive* list of parts - but the max length allowed for messages in Discord is 2000 characters, and you're at *{}*.".format(len(output))
msg += '\nMaybe see if you can prune up that list a bit and try again?'
return await ctx.send(msg)
await ctx.send(Utils.suppressed(ctx,output))
@commands.command(pass_context=True)
async def mainhw(self, ctx, *, build = None):
"""Sets a new main build from your build list."""
if not build:
return await ctx.send("Usage: `{}mainhw [build name or number]`".format(ctx.prefix))
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
mainBuild = None
# Get build by name first - then by number
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
mainBuild = b
break
if mainBuild:
# Found it!
for b in buildList:
if b is mainBuild:
b['Main'] = True
else:
b['Main'] = False
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} set as main!".format(mainBuild['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
mainBuild = buildList[build]
except:
pass
if mainBuild:
# Found it!
for b in buildList:
if b is mainBuild:
b['Main'] = True
else:
b['Main'] = False
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} set as main!".format(mainBuild['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
msg = "I couldn't find that build or number."
await ctx.send(msg)
@commands.command(pass_context=True)
async def delhw(self, ctx, *, build = None):
"""Removes a build from your build list."""
if not build:
return await ctx.send("Usage: `{}delhw [build name or number]`".format(ctx.prefix))
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
# Get build by name first - then by number
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
buildList.remove(b)
if b['Main'] and len(buildList):
buildList[0]['Main'] = True
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} removed!".format(b['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
b = buildList.pop(build)
if b['Main'] and len(buildList):
buildList[0]['Main'] = True
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = "{} removed!".format(b['Name'])
return await ctx.send(Utils.suppressed(ctx,msg))
except:
pass
msg = "I couldn't find that build or number."
await ctx.send(msg)
@commands.command(pass_context=True)
async def edithw(self, ctx, *, build = None):
"""Edits a build from your build list."""
hwChannel = None
if ctx.guild:
# Not a pm
hwChannel = self.settings.getServerStat(ctx.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
msg = 'This isn\'t the channel for that...'
for chan in ctx.guild.channels:
if str(chan.id) == str(hwChannel):
msg = 'This isn\'t the channel for that. Take the hardware talk to the **{}** channel.'.format(chan.name)
break
return await ctx.send(Utils.suppressed(ctx,msg))
else:
hwChannel = self.bot.get_channel(hwChannel)
if not hwChannel:
# Nothing set - pm
hwChannel = ctx.author
# Make sure we're not already in a parts transaction
if str(ctx.author.id) in self.hwactive:
return await ctx.send("You're already in a hardware session! You can leave with `{}cancelhw`".format(ctx.prefix))
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = 'You have no builds on file! You can add some with the `{}newhw` command.'.format(ctx.prefix)
return await ctx.send(msg)
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
mainBuild = None
# Get build by name first - then by number
if build is not None:
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
mainBuild = b
break
if not mainBuild:
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
mainBuild = buildList[build]
except:
pass
else:
# No build passed - get the main if it exists
for b in buildList:
if b['Main']:
mainBuild = b
break
if not mainBuild:
msg = "I couldn't find that build or number."
return await ctx.send(msg)
# Set our HWActive flag
hw_id = self.gen_id()
self.hwactive[str(ctx.author.id)] = hw_id
# Here, we have a build
bname = Utils.suppressed(ctx,mainBuild['Name'])
bparts = Utils.suppressed(ctx,mainBuild['Hardware'])
msg = '"{}"\'s current parts:'.format(bname)
try:
await hwChannel.send(msg)
except:
# Can't send to the destination
self._stop_hw(ctx.author)
if hwChannel == ctx.author:
# Must not accept pms
await ctx.send("It looks like you don't accept pms. Please enable them and try again.")
return
if hwChannel == ctx.author and ctx.channel != ctx.author.dm_channel:
await ctx.message.add_reaction("📬")
await hwChannel.send(bparts)
msg = 'Alright, *{}*, what parts does "{}" have now? (Please include *all* parts for this build - you can add new lines with *shift + enter*)\n'.format(DisplayName.name(ctx.author), bname)
msg += 'You can also pass pcpartpicker links to have them formatted automagically - I can also format them using different styles.\n'
msg += 'For example: '
msg += '```https://pcpartpicker.com/list/123456 mdblock``` would format with the markdown block style.\n'
msg += 'Markdown styles available are *normal, md, mdblock, bold, bolditalic*'
while True:
parts = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
if not parts:
self._stop_hw(ctx.author)
return
if 'pcpartpicker.com' in parts.content.lower():
# Possibly a pc partpicker link?
msg = 'It looks like you sent a pc part picker link - did you want me to try and format that? (y/n/stop)'
test = await self.confirm(hw_id, ctx, parts, hwChannel, msg)
if test == None:
self._stop_hw(ctx.author)
return
elif test == True:
partList = parts.content.split()
if len(partList) == 1:
partList.append(None)
output = None
try:
output = await PCPP.getMarkdown(partList[0], partList[1], False)
except:
pass
if not output:
msg = 'Something went wrong! Make sure you use a valid pcpartpicker link.'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
if len(output) > 2000:
msg = "That's an *impressive* list of parts - but the max length allowed for messages in Discord is 2000 characters, and you're at *{}*.".format(len(output))
msg += '\nMaybe see if you can prune up that list a bit and try again?'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
# Make sure
conf = await self.confirm(hw_id, ctx, output, hwChannel, None, ctx.author)
if conf == None:
# Timed out
self._stop_hw(ctx.author)
return
elif conf == False:
# Didn't get our answer
msg = 'Alright, *{}*, what parts does "{}" have now? (Please include *all* parts for this build - you can add new lines with *shift + enter*)'.format(DisplayName.name(ctx.author), bname)
continue
m = '{} set to:\n{}'.format(bname, output)
await hwChannel.send(m)
mainBuild['Hardware'] = output
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
break
mainBuild['Hardware'] = parts.content
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
break
msg = '*{}*, {} was edited successfully!'.format(DisplayName.name(ctx.author), bname)
self._stop_hw(ctx.author)
await hwChannel.send(msg)
@commands.command(pass_context=True)
async def renhw(self, ctx, *, build = None):
"""Renames a build from your build list."""
hwChannel = None
if ctx.guild:
# Not a pm
hwChannel = self.settings.getServerStat(ctx.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
msg = 'This isn\'t the channel for that...'
for chan in ctx.guild.channels:
if str(chan.id) == str(hwChannel):
msg = 'This isn\'t the channel for that. Take the hardware talk to the **{}** channel.'.format(chan.name)
await ctx.send(msg)
return
else:
hwChannel = self.bot.get_channel(hwChannel)
if not hwChannel:
# Nothing set - pm
hwChannel = ctx.author
# Make sure we're not already in a parts transaction
if str(ctx.author.id) in self.hwactive:
await ctx.send("You're already in a hardware session! You can leave with `{}cancelhw`".format(ctx.prefix))
return
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = 'You have no builds on file! You can add some with the `{}newhw` command.'.format(ctx.prefix)
await ctx.send(msg)
return
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
mainBuild = None
# Get build by name first - then by number
if build is not None:
for b in buildList:
if b['Name'].lower() == build.lower():
# Found it
mainBuild = b
break
if not mainBuild:
try:
build = int(build)-1
if build >= 0 and build < len(buildList):
mainBuild = buildList[build]
except:
pass
else:
# No build passed - get the main if it exists
for b in buildList:
if b['Main']:
mainBuild = b
break
if not mainBuild:
msg = "I couldn't find that build or number."
await ctx.send(msg)
return
# Set our HWActive flag
hw_id = self.gen_id()
self.hwactive[str(ctx.author.id)] = hw_id
# Post the dm reaction
if hwChannel == ctx.author and ctx.channel != ctx.author.dm_channel:
await ctx.message.add_reaction("📬")
# Here, we have a build
bname = Utils.suppressed(ctx,mainBuild['Name'])
msg = 'Alright, *{}*, what do you want to rename "{}" to?'.format(DisplayName.name(ctx.author), bname)
while True:
try:
buildName = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
except:
# Can't send to the destination
self._stop_hw(ctx.author)
if hwChannel == ctx.author:
# Must not accept pms
await ctx.send("It looks like you don't accept pms. Please enable them and try again.")
return
if not buildName:
self._stop_hw(ctx.author)
return
buildExists = False
for build in buildList:
if build['Name'].lower() == buildName.content.lower():
mesg = 'It looks like you already have a build by that name, *{}*. Try again.'.format(DisplayName.name(ctx.author))
await hwChannel.send(mesg)
buildExists = True
break
if not buildExists:
mainBuild['Name'] = buildName.content
# Flush settings to all servers
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
break
bname2 = Utils.suppressed(ctx,buildName.content)
msg = '*{}*, {} was renamed to {} successfully!'.format(DisplayName.name(ctx.author), bname, bname2)
self._stop_hw(ctx.author)
await hwChannel.send(msg)
@commands.command(pass_context=True)
async def gethw(self, ctx, *, user = None, search = None):
"""Searches the user's hardware for a specific search term."""
if not user:
usage = "Usage: `{}gethw [user] [search term]`".format(ctx.prefix)
return await ctx.send(usage)
# Let's check for username and search term
parts = user.split()
memFromName = None
entries = []
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
memFromName = None
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
# Got a member - let's check the remainder length, and search!
if len(buildStr) < 3:
usage = "Search term must be at least 3 characters."
return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware", [])
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
bParts = build['Hardware']
for line in bParts.splitlines():
if buildStr.lower() in line.lower():
entries.append({"name":"{}. {}".format(len(entries)+1,build["Name"]),"value":line})
if len(entries):
# We're in business
return await PickList.PagePicker(title="Search results for \"{}\" ({:,} total)".format(buildStr, len(entries)),list=entries,ctx=ctx).pick()
# If we're here - then we didn't find a member - set it to the author, and run another quick search
buildStr = user
if len(buildStr) < 3:
usage = "Search term must be at least 3 characters."
return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware", [])
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
bParts = build['Hardware']
for line in bParts.splitlines():
if buildStr.lower() in line.lower():
entries.append({"name":"{}. {}".format(len(entries)+1,build["Name"]),"value":line})
if len(entries):
# We're in business
return await PickList.PagePicker(title="Search results for \"{}\" ({:,} total)".format(buildStr, len(entries)),list=entries,ctx=ctx).pick()
return await Message.EmbedText(title="Nothing found for that search.",color=ctx.author).send(ctx)
@commands.command(pass_context=True)
async def hw(self, ctx, *, user : str = None, build = None):
"""Lists the hardware for either the user's default build - or the passed build."""
if not user:
user = "{}".format(ctx.author.mention)
# Let's check for username and build name
parts = user.split()
memFromName = None
buildParts = None
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
for build in buildList:
if build['Name'].lower() == buildStr.lower():
# Ha! Found it!
buildParts = build
break
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# Try again with numbers
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
try:
buildStr = int(buildStr)-1
if buildStr >= 0 and buildStr < len(buildList):
buildParts = buildList[buildStr]
except Exception:
memFromName = None
buildParts = None
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# One last shot - check if it's a build for us
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
if build['Name'].lower() == user.lower():
memFromName = ctx.author
buildParts = build
break
if not memFromName:
# Okay - *this* time is the last - check for number
try:
user_as_build = int(user)-1
if user_as_build >= 0 and user_as_build < len(buildList):
buildParts = buildList[user_as_build]
memFromName = ctx.author
except Exception:
pass
if not memFromName:
# Last check for a user passed as the only param
memFromName = DisplayName.memberForName(user, ctx.guild)
if not memFromName:
# We couldn't find them :(
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
if buildParts == None:
# Check if that user has no builds
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(memFromName), ctx.prefix)
return await ctx.send(msg)
# Must be the default build
for build in buildList:
if build['Main']:
buildParts = build
break
if not buildParts:
# Well... uh... no defaults
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
# At this point - we *should* have a user and a build
name = DisplayName.name(memFromName)
msg_head = "__**{}'{} {}:**__\n\n".format(name,"" if name[-1:].lower()=="s" else "s", buildParts['Name'])
msg = msg_head + buildParts['Hardware']
if len(msg) > 2000: # is there somwhere the discord char count is defined, to avoid hardcoding?
msg = buildParts['Hardware'] # if the header pushes us over the limit, omit it and send just the string
await ctx.send(Utils.suppressed(ctx,msg))
@commands.command(pass_context=True)
async def rawhw(self, ctx, *, user : str = None, build = None):
"""Lists the raw markdown for either the user's default build - or the passed build."""
if not user:
user = "{}#{}".format(ctx.author.name, ctx.author.discriminator)
# Let's check for username and build name
parts = user.split()
memFromName = None
buildParts = None
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
for build in buildList:
if build['Name'].lower() == buildStr.lower():
# Ha! Found it!
buildParts = build
break
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# Try again with numbers
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
nameStr = ' '.join(parts[0:i])
buildStr = ' '.join(parts[i:])
memFromName = DisplayName.memberForName(nameStr, ctx.guild)
if memFromName:
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
try:
buildStr = int(buildStr)-1
if buildStr >= 0 and buildStr < len(buildList):
buildParts = buildList[buildStr]
except Exception:
memFromName = None
buildParts = None
if buildParts:
# We're in business
break
else:
memFromName = None
if not memFromName:
# One last shot - check if it's a build for us
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
for build in buildList:
if build['Name'].lower() == user.lower():
memFromName = ctx.author
buildParts = build
break
if not memFromName:
# Okay - *this* time is the last - check for number
try:
user_as_build = int(user)-1
if user_as_build >= 0 and user_as_build < len(buildList):
buildParts = buildList[user_as_build]
memFromName = ctx.author
except Exception:
pass
if not memFromName:
# Last check for a user passed as the only param
memFromName = DisplayName.memberForName(user, ctx.guild)
if not memFromName:
# We couldn't find them :(
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
if buildParts == None:
# Check if that user has no builds
buildList = self.settings.getGlobalUserStat(memFromName, "Hardware")
if buildList == None:
buildList = []
if not len(buildList):
# No parts!
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(memFromName), ctx.prefix)
return await ctx.send(msg)
# Must be the default build
for build in buildList:
if build['Main']:
buildParts = build
break
if not buildParts:
# Well... uh... no defaults
msg = "I couldn't find that user/build combo..."
return await ctx.send(msg)
# At this point - we *should* have a user and a build
p = discord.utils.escape_markdown(buildParts['Hardware'])
name = DisplayName.name(memFromName)
msg = "__**{}'{} {} (Raw Markdown):**__\n\n{}".format(name,"" if name[-1:].lower()=="s" else "s", buildParts['Name'], p)
await ctx.send(Utils.suppressed(ctx,msg))
@commands.command(pass_context=True)
async def listhw(self, ctx, *, user = None):
"""Lists the builds for the specified user - or yourself if no user passed."""
usage = 'Usage: `{}listhw [user]`'.format(ctx.prefix)
if not user:
user = "{}#{}".format(ctx.author.name, ctx.author.discriminator)
member = DisplayName.memberForName(user, ctx.guild)
if not member:
return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(member, "Hardware")
if buildList == None:
buildList = []
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
if not len(buildList):
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(member), ctx.prefix)
return await ctx.send(msg)
items = [{"name":"{}. {}".format(i,x["Name"]+(" (Main Build)" if x["Main"] else "")),"value":Utils.truncate_string(x["Hardware"])} for i,x in enumerate(buildList,start=1)]
return await PickList.PagePicker(title="{}'s Builds ({:,} total)".format(DisplayName.name(member),len(buildList)),list=items,ctx=ctx).pick()
@commands.command()
async def lhw(self, ctx, *, user = None):
"""Lists only the titles of the builds for the specified user - or yourself if no user passed."""
usage = 'Usage: `{}lhw [user]`'.format(ctx.prefix)
if not user: user = ctx.author.id
member = DisplayName.memberForName(user, ctx.guild)
if not member: return await ctx.send(usage)
buildList = self.settings.getGlobalUserStat(member, "Hardware", [])
buildList = sorted(buildList, key=lambda x:x['Name'].lower())
if not len(buildList):
msg = '*{}* has no builds on file! They can add some with the `{}newhw` command.'.format(DisplayName.name(member), ctx.prefix)
return await ctx.send(msg)
desc = "\n".join([Utils.truncate_string("{}. {}".format(i,x["Name"]+(" (Main Build)" if x["Main"] else ""))) for i,x in enumerate(buildList,start=1)])
return await PickList.PagePicker(
title="{}'s Builds ({:,} total)".format(DisplayName.name(member),len(buildList)),
description=desc,
ctx=ctx
).pick()
@commands.command(pass_context=True)
async def newhw(self, ctx):
"""Initiate a new-hardware conversation with the bot. The hardware added will also be set as the Main Build."""
buildList = self.settings.getGlobalUserStat(ctx.author, "Hardware")
if buildList == None:
buildList = []
hwChannel = None
if ctx.guild:
# Not a pm
hwChannel = self.settings.getServerStat(ctx.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
msg = 'This isn\'t the channel for that...'
for chan in ctx.guild.channels:
if str(chan.id) == str(hwChannel):
msg = 'This isn\'t the channel for that. Take the hardware talk to the **{}** channel.'.format(chan.name)
return await ctx.send(msg)
else:
hwChannel = self.bot.get_channel(hwChannel)
if not hwChannel:
# Nothing set - pm
hwChannel = ctx.author
# Make sure we're not already in a parts transaction
if str(ctx.author.id) in self.hwactive:
return await ctx.send("You're already in a hardware session! You can leave with `{}cancelhw`".format(ctx.prefix))
# Set our HWActive flag
hw_id = self.gen_id()
self.hwactive[str(ctx.author.id)] = hw_id
msg = 'Alright, *{}*, let\'s add a new build.\n\n'.format(DisplayName.name(ctx.author))
if len(buildList) == 1:
msg += 'You currently have *1 build* on file.\n\n'
else:
msg += 'You currently have *{} builds* on file.\n\nLet\'s get started!'.format(len(buildList))
try:
await hwChannel.send(msg)
except:
# Can't send to the destination
self._stop_hw(ctx.author)
if hwChannel == ctx.author:
# Must not accept pms
await ctx.send("It looks like you don't accept pms. Please enable them and try again.")
return
if hwChannel == ctx.author and ctx.channel != ctx.author.dm_channel:
await ctx.message.add_reaction("📬")
msg = '*{}*, tell me what you\'d like to call this build (type stop to cancel):'.format(DisplayName.name(ctx.author))
# Get the build name
newBuild = { 'Main': True }
while True:
buildName = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
if not buildName:
self._stop_hw(ctx.author)
return
buildExists = False
for build in buildList:
if build['Name'].lower() == buildName.content.lower():
mesg = 'It looks like you already have a build by that name, *{}*. Try again.'.format(DisplayName.name(ctx.author))
await hwChannel.send(mesg)
buildExists = True
break
if not buildExists:
newBuild['Name'] = buildName.content
break
bname = Utils.suppressed(ctx,buildName.content)
msg = 'Alright, *{}*, what parts does "{}" have? (Please include *all* parts for this build - you can add new lines with *shift + enter*)\n'.format(DisplayName.name(ctx.author), bname)
msg += 'You can also pass pcpartpicker links to have them formatted automagically - I can also format them using different styles.\n'
msg += 'For example: '
msg += '```https://pcpartpicker.com/list/123456 mdblock``` would format with the markdown block style.\n'
msg += 'Markdown styles available are *normal, md, mdblock, bold, bolditalic*'
while True:
parts = await self.prompt(hw_id, ctx, msg, hwChannel, DisplayName.name(ctx.author))
if not parts:
self._stop_hw(ctx.author)
return
if 'pcpartpicker.com' in parts.content.lower():
# Possibly a pc partpicker link?
msg = 'It looks like you sent a pc part picker link - did you want me to try and format that? (y/n/stop)'
test = await self.confirm(hw_id, ctx, parts, hwChannel, msg)
if test == None:
self._stop_hw(ctx.author)
return
elif test == True:
partList = parts.content.split()
if len(partList) == 1:
partList.append(None)
output = None
try:
output = await PCPP.getMarkdown(partList[0], partList[1], False)
except:
pass
#output = PCPP.getMarkdown(parts.content)
if not output:
msg = 'Something went wrong! Make sure you use a valid pcpartpicker link.'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
if len(output) > 2000:
msg = "That's an *impressive* list of parts - but the max length allowed for messages in Discord is 2000 characters, and you're at *{}*.".format(len(output))
msg += '\nMaybe see if you can prune up that list a bit and try again?'
await hwChannel.send(msg)
self._stop_hw(ctx.author)
return
# Make sure
conf = await self.confirm(hw_id, ctx, output, hwChannel, None, ctx.author)
if conf == None:
# Timed out
self._stop_hw(ctx.author)
return
elif conf == False:
# Didn't get our answer
msg = 'Alright, *{}*, what parts does "{}" have? (Please include *all* parts for this build - you can add new lines with *shift + enter*)'.format(DisplayName.name(ctx.author), bname)
continue
m = '{} set to:\n{}'.format(bname, output)
await hwChannel.send(m)
newBuild['Hardware'] = output
break
newBuild['Hardware'] = parts.content
break
# Check if we already have a main build and clear it
for build in buildList:
if build['Main']:
build['Main'] = False
buildList.append(newBuild)
self.settings.setGlobalUserStat(ctx.author, "Hardware", buildList)
msg = '*{}*, {} was created successfully! It has been set as your main build. To select a different main, you can use `{}mainhw`'.format(DisplayName.name(ctx.author), bname, ctx.prefix)
self._stop_hw(ctx.author)
await hwChannel.send(msg)
# New HW helper methods
def channelCheck(self, msg, dest = None):
if self.stillHardwaring(msg.author) == False:
# any message is a valid check if we're not editing
return True
if dest:
# We have a target channel
if type(dest) is discord.User or type(dest) is discord.Member:
dest = dest.dm_channel.id
elif type(dest) is discord.TextChannel:
dest = dest.id
elif type(dest) is discord.Guild:
dest = dest.get_channel(dest.id).id
if not dest == msg.channel.id:
return False
else:
# Just make sure it's in pm or the hw channel
if msg.channel == discord.TextChannel:
# Let's check our server stuff
hwChannel = self.settings.getServerStat(msg.guild, "HardwareChannel")
if not (not hwChannel or hwChannel == ""):
# We need the channel id
if not str(hwChannel) == str(ctx.channel.id):
return False
else:
# Nothing set - pm
if not type(msg.channel) == discord.DMChannel:
return False
return True
# Makes sure we're still editing - if this gets set to False,
# that means the user stopped editing/newhw
def stillHardwaring(self, author):
return str(author.id) in self.hwactive
def confirmCheck(self, msg, dest = None):
if not self.channelCheck(msg, dest):
return False
msgStr = msg.content.lower()
if msgStr.startswith('y'):
return True
if msgStr.startswith('n'):
return True
elif msgStr.startswith('stop'):
return True
return False
async def confirm(self, hw_id, ctx, message, dest = None, m = None, author = None):
# Get author name
authorName = None
if author:
if type(author) is str:
authorName = author
else:
try:
authorName = DisplayName.name(author)
except Exception:
pass
else:
if message:
try:
author = message.author
except Exception:
pass
try:
authorName = DisplayName.name(message.author)
except Exception:
pass
if not dest:
dest = message.channel
if not m:
if authorName:
msg = '*{}*, I got:'.format(Utils.suppressed(ctx,authorName))
else:
msg = "I got:"
if type(message) is str:
msg2 = Utils.suppressed(ctx,message)
else:
msg2 = '{}'.format(Utils.suppressed(ctx,message.content))
msg3 = 'Is that correct? (y/n/stop)'
await dest.send(msg)
await dest.send(msg2)
await dest.send(msg3)
else:
msg = m
await dest.send(Utils.suppressed(ctx,msg))
while True:
def littleCheck(m):
return ctx.author.id == m.author.id and self.confirmCheck(m, dest) and len(m.content)
try:
talk = await self.bot.wait_for('message', check=littleCheck, timeout=300)
except Exception:
talk = None
# See if we're still in the right context
if not hw_id == self.hwactive.get(str(ctx.author.id),None):
return None
# Hardware ended
if not self.stillHardwaring(ctx.author):
return None
if not talk:
if authorName:
msg = "*{}*, I'm out of time...".format(authorName)
else:
msg = "I'm out of time..."
await dest.send(msg)
return None
else:
# We got something
if talk.content.lower().startswith('y'):
return True
elif talk.content.lower().startswith('stop'):
if authorName:
msg = "No problem, *{}!* See you later!".format(authorName)
else:
msg = "No problem! See you later!"
await dest.send(msg)
return None
else:
return False
async def prompt(self, hw_id, ctx, message, dest = None, author = None):
# Get author name
authorName = None
if author:
if type(author) is str:
authorName = author
else:
try:
authorName = DisplayName.name(author)
except Exception:
pass
else:
if message:
try:
author = message.author
except Exception:
pass
try:
authorName = DisplayName.name(message.author)
except Exception:
pass
if not dest:
dest = ctx.channel
await dest.send(Utils.suppressed(ctx,message))
while True:
def littleCheck(m):
return ctx.author.id == m.author.id and self.channelCheck(m, dest) and len(m.content)
try:
talk = await self.bot.wait_for('message', check=littleCheck, timeout=300)
except Exception:
talk = None
# See if we're still in the right context
if not hw_id == self.hwactive.get(str(ctx.author.id),None):
return None
# Hardware ended
if not self.stillHardwaring(ctx.author):
return None
if not talk:
msg = "*{}*, I'm out of time...".format(authorName)
await dest.send(msg)
return None
else:
# Check for a stop
if talk.content.lower() == 'stop':
msg = "No problem, *{}!* See you later!".format(authorName, ctx.prefix)
await dest.send(msg)
return None
# Make sure
conf = await self.confirm(hw_id, ctx, talk, dest, "", author)
if conf == True:
# We're sure - return the value
return talk
elif conf == False:
# Not sure - ask again
return await self.prompt(hw_id, ctx, message, dest, author)
else:
# Timed out
return None
|
{
"content_hash": "750303675331de5fa5b2885c0632eea0",
"timestamp": "",
"source": "github",
"line_count": 1096,
"max_line_length": 192,
"avg_line_length": 34.97445255474452,
"alnum_prop": 0.6378743608473338,
"repo_name": "corpnewt/CorpBot.py",
"id": "ee3d00c33e7f389971055303165070d7240f1978",
"size": "38341",
"binary": false,
"copies": "1",
"ref": "refs/heads/rewrite",
"path": "Cogs/Hw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "21938"
},
{
"name": "Python",
"bytes": "1371709"
},
{
"name": "Shell",
"bytes": "1598"
}
],
"symlink_target": ""
}
|
"""
Welcome to the API documentation for MongoKat.
Please see the [README on GitHub](https://github.com/pricingassistant/mongokat) for more info about MongoKat.
"""
from mongokat._bson import decode_all, _element_to_dict, _get_object, _elements_to_dict
import pymongo
import datetime
# This is the only monkey-patch needed to use our own bson.decode_all function,
# which implements https://jira.mongodb.org/browse/PYTHON-175
import bson
import sys
bson.decode_all = decode_all
bson._elements_to_dict = _elements_to_dict
bson._ELEMENT_GETTER[bson.BSONOBJ] = _get_object
bson._element_to_dict = _element_to_dict
# This other monkey-patch is needed to disable the type check on document_class, because
# we also can pass a tuple to use additional kwargs in the document_class instanciation.
class CodecOptionsWithoutCheck(bson.codec_options.CodecOptions):
def __new__(cls, document_class=dict,
tz_aware=False, uuid_representation=bson.codec_options.PYTHON_LEGACY,
unicode_decode_error_handler="strict", tzinfo=None):
# if not issubclass(document_class, MutableMapping):
# raise TypeError("document_class must be dict, bson.son.SON, or "
if not isinstance(tz_aware, bool):
raise TypeError("tz_aware must be True or False")
if uuid_representation not in bson.binary.ALL_UUID_REPRESENTATIONS:
raise ValueError("uuid_representation must be a value "
"from bson.binary.ALL_UUID_REPRESENTATIONS")
if not isinstance(unicode_decode_error_handler, (bson.py3compat.string_type, None)):
raise ValueError("unicode_decode_error_handler must be a string "
"or None")
if tzinfo is not None:
if not isinstance(tzinfo, datetime.tzinfo):
raise TypeError(
"tzinfo must be an instance of datetime.tzinfo")
if not tz_aware:
raise ValueError(
"cannot specify tzinfo without also setting tz_aware=True")
return tuple.__new__(
cls, (document_class, tz_aware, uuid_representation,
unicode_decode_error_handler, tzinfo))
bson.codec_options.CodecOptions = CodecOptionsWithoutCheck
from .collection import Collection, find_method
from .document import Document
|
{
"content_hash": "12f0afe31c6463ab6dfb86d68f227150",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 109,
"avg_line_length": 43.056603773584904,
"alnum_prop": 0.7002629272567923,
"repo_name": "pricingassistant/mongokat",
"id": "9002982d5d85ab7c3912760b5952885937655c4d",
"size": "2282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongokat/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "126648"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Python",
"bytes": "105182"
}
],
"symlink_target": ""
}
|
import abc
import functools
import re
import six
from oslo_log import log as logging
from trove.common import exception
from trove.common.i18n import _
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ModuleDriver(object):
"""Base class that defines the contract for module drivers.
Note that you don't have to derive from this class to have a valid
driver; it is purely a convenience. Any class that adheres to the
'interface' as dictated by this class' abstractmethod decorators
(and other methods such as get_type, get_name and configure)
will work.
"""
def __init__(self):
super(ModuleDriver, self).__init__()
# This is used to store any message args to be substituted by
# the output decorator when logging/returning messages.
self._module_message_args = {}
self._message_args = None
self._generated_name = None
@property
def message_args(self):
"""Return a dict of message args that can be used to enhance
the output decorator messages. This shouldn't be overridden; use
self.message_args = <dict> instead to append values.
"""
if not self._message_args:
self._message_args = {
'name': self.get_name(),
'type': self.get_type()}
self._message_args.update(self._module_message_args)
return self._message_args
@message_args.setter
def message_args(self, values):
"""Set the message args that can be used to enhance
the output decorator messages.
"""
values = values or {}
self._module_message_args = values
self._message_args = None
@property
def generated_name(self):
if not self._generated_name:
# Turn class name into 'module type' format.
# For example: DoCustomWorkDriver -> do_custom_work
temp = re.sub('(.)[Dd]river$', r'\1', self.__class__.__name__)
temp2 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', temp)
temp3 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp2)
self._generated_name = temp3.lower()
return self._generated_name
def get_type(self):
"""This is used when setting up a module in Trove, and is here for
code clarity. It just returns the name of the driver by default.
"""
return self.get_name()
def get_name(self):
"""Use the generated name based on the class name. If
overridden, must be in lower-case.
"""
return self.generated_name
@abc.abstractmethod
def get_description(self):
"""Description for the driver."""
pass
@abc.abstractmethod
def get_updated(self):
"""Date the driver was last updated."""
pass
@abc.abstractmethod
def apply(self, name, datastore, ds_version, data_file, admin_module):
"""Apply the module to the guest instance. Return status and message
as a tuple. Passes in whether the module was created with 'admin'
privileges. This can be used as a form of access control by having
the driver refuse to apply a module if it wasn't created with options
that indicate that it was done by an 'admin' user.
"""
return False, "Not a concrete driver"
@abc.abstractmethod
def remove(self, name, datastore, ds_version, data_file):
"""Remove the module from the guest instance. Return
status and message as a tuple.
"""
return False, "Not a concrete driver"
def configure(self, name, datastore, ds_version, data_file):
"""Configure the driver. This is particularly useful for adding values
to message_args, by having a line such as: self.message_args = <dict>.
These values will be appended to the default ones defined
in the message_args @property.
"""
pass
def output(log_message=None, success_message=None,
fail_message=None):
"""This is a decorator to trap the typical exceptions that occur
when applying and removing modules. It returns the proper output
corresponding to the error messages automatically. If the function
returns output (success_flag, message) then those are returned,
otherwise success is assumed and the success_message returned.
Using this removes a lot of potential boiler-plate code, however
it is not necessary.
Keyword arguments can be used in the message string. Default
values can be found in the message_args @property, however a
driver can add whatever it see fit, by setting message_args
to a dict in the configure call (see above). Thus if you set
self.message_args = {'my_key': 'my_key_val'} then the message
string could look like "My key is '$(my_key)s'".
"""
success_message = success_message or "Success"
fail_message = fail_message or "Fail"
def output_decorator(func):
"""This is the actual decorator."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Here's where we handle the error messages and return values
from the actual function.
"""
log_msg = log_message
success_msg = success_message
fail_msg = fail_message
if isinstance(args[0], ModuleDriver):
# Try and insert any message args if they exist in the driver
message_args = args[0].message_args
if message_args:
try:
log_msg = log_msg % message_args
success_msg = success_msg % message_args
fail_msg = fail_msg % message_args
except Exception:
# if there's a problem, just log it and drive on
LOG.warning(_("Could not apply message args: %s") %
message_args)
pass
if log_msg:
LOG.info(log_msg)
success = False
try:
rv = func(*args, **kwargs)
if rv:
# Use the actual values, if there are some
success, message = rv
else:
success = True
message = success_msg
except exception.ProcessExecutionError as ex:
message = (_("%(msg)s: %(out)s\n%(err)s") %
{'msg': fail_msg,
'out': ex.stdout,
'err': ex.stderr})
message = message.replace(': \n', ': ')
message = message.rstrip()
LOG.exception(message)
except exception.TroveError as ex:
message = (_("%(msg)s: %(err)s") %
{'msg': fail_msg, 'err': ex._error_string})
LOG.exception(message)
except Exception as ex:
message = (_("%(msg)s: %(err)s") %
{'msg': fail_msg, 'err': ex.message})
LOG.exception(message)
return success, message
return wrapper
return output_decorator
|
{
"content_hash": "18250e3f515e73ef8772addc89398c01",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 38.305263157894736,
"alnum_prop": 0.5762572135201979,
"repo_name": "hplustree/trove",
"id": "e758c2a7af3bdb2f210b452d9c0251a5c0d607ad",
"size": "7908",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trove/guestagent/module/drivers/module_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4757844"
},
{
"name": "Shell",
"bytes": "191911"
}
],
"symlink_target": ""
}
|
from google.analytics import admin_v1alpha
async def sample_run_access_report():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceAsyncClient()
# Initialize request argument(s)
request = admin_v1alpha.RunAccessReportRequest(
)
# Make the request
response = await client.run_access_report(request=request)
# Handle the response
print(response)
# [END analyticsadmin_v1alpha_generated_AnalyticsAdminService_RunAccessReport_async]
|
{
"content_hash": "97e3885c2b98980f205a780ecf913b1c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 84,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.7494824016563147,
"repo_name": "googleapis/python-analytics-admin",
"id": "8400d66e4a6544bf7b0e116b031551d0819d077e",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticsadmin_v1alpha_generated_analytics_admin_service_run_access_report_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
}
|
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(
q: str
| None = Query(
default=None,
title="Query string",
description="Query string for the items to search in the database that have a good match",
min_length=3,
)
):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
{
"content_hash": "cb053a8ba8ecf8862af85bebf949594c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 98,
"avg_line_length": 23,
"alnum_prop": 0.5720823798627003,
"repo_name": "tiangolo/fastapi",
"id": "489f631d5e90875a5b9128dffbd7aac973c50317",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/query_params_str_validations/tutorial008_py310.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
'''pyMKL
'''
from distutils.core import setup
from setuptools import find_packages
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Natural Language :: English',
]
with open('README.rst') as fp:
LONG_DESCRIPTION = ' '.join(fp.readlines())
setup(
name = 'pyMKL',
version = '0.0.3',
packages = find_packages(),
install_requires = ['numpy>=1.7',
'scipy>=0.13',
'future',
],
author = 'Dave Marchant',
author_email = 'dwfmarchant@gmail.com',
description = 'Python wrapper of Intel MKL routines',
long_description = LONG_DESCRIPTION,
license = 'MIT',
keywords = 'sparse linear solver mkl pardiso',
url = 'https://github.com/dwfmarchant/pyMKL',
download_url = 'https://github.com/dwfmarchant/pyMKL/tarball/0.0.3',
classifiers = CLASSIFIERS,
platforms = ['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
use_2to3 = False,
)
|
{
"content_hash": "d2cac8b7fb004e393535cbe3e798046f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 31.52,
"alnum_prop": 0.6364213197969543,
"repo_name": "dwfmarchant/pyMKL",
"id": "61b730e76cd354ebd6848b9bc837d6dbf7b887bd",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21121"
}
],
"symlink_target": ""
}
|
#-------------------------------------------------------------------------
import cross3d
from cross3d import abstractmethod
from collections import MutableSequence
class Collection(MutableSequence):
"""
The Collection object allows to perform operation on several objects at once
allowing to optimize the process and lift weight from the Scene object.
"""
# ----------------------------------------------
# TODO: Implement that block.
def __setitem__(self, index, item):
pass
def __delitem__(self, index):
pass
def __len__(self):
pass
def insert(self, index, item):
pass
# ----------------------------------------------
@classmethod
def _objectsGenerator(cls, scene, objects):
for obj in objects:
if isinstance(obj, cross3d.SceneObject):
yield obj
else:
yield cross3d.SceneObject(scene, obj)
@classmethod
def _nativeObjectsGenerator(cls, objects):
for obj in objects:
if isinstance(obj, cross3d.SceneObject):
yield obj()
else:
yield obj
def __init__(self, scene, objects):
self._scene = scene
self._objects = self._objectsGenerator(scene, objects)
self._nativeObjects = self._nativeObjectsGenerator(objects)
super(Collection, self).__init__()
@abstractmethod
def setHidden(self, hidden):
return False
# Registering the symbol.
cross3d.registerSymbol('Collection', Collection)
|
{
"content_hash": "1bbbce1f0cd8aad87b97d35e2d5eab7c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 23.766666666666666,
"alnum_prop": 0.6009817671809257,
"repo_name": "blurstudio/cross3d",
"id": "449d30b0a1a2eed85261f399a012a9474b17d94c",
"size": "1666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cross3d/abstract/collection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "46"
},
{
"name": "MAXScript",
"bytes": "17640"
},
{
"name": "Python",
"bytes": "1023336"
}
],
"symlink_target": ""
}
|
from AppKit import NSSegmentedControl, NSSegmentedCell, NSImage, NSSegmentSwitchTrackingSelectOne, NSSegmentSwitchTrackingSelectAny, NSSegmentSwitchTrackingMomentary
from vanilla.vanillaBase import VanillaBaseControl
_trackingModeMap = {
"one": NSSegmentSwitchTrackingSelectOne,
"any": NSSegmentSwitchTrackingSelectAny,
"momentary": NSSegmentSwitchTrackingMomentary,
}
class SegmentedButton(VanillaBaseControl):
"""
A standard segmented button.
.. image:: /_images/SegmentedButton.png
::
from vanilla import Window, SegmentedButton
class SegmentedButtonDemo:
def __init__(self):
self.w = Window((120, 40))
self.w.button = SegmentedButton((10, 10, -10, 20),
[dict(title="A"), dict(title="B"), dict(title="C")],
callback=self.buttonCallback)
self.w.open()
def buttonCallback(self, sender):
print("button hit!")
SegmentedButtonDemo()
**posSize** Tuple of form *(left, top, width, height)* or *"auto"* representing
the position and size of the segmented button. The size of the segmented button
should match the appropriate value for the given *sizeStyle*.
+-------------------------+
| **Standard Dimensions** |
+=========+===+===========+
| Regular | H | 21 |
+---------+---+-----------+
| Small | H | 18 |
+---------+---+-----------+
| Mini | H | 15 |
+---------+---+-----------+
**segmentDescriptions** An ordered list of dictionaries describing the segments.
+----------------------------+----------------------------------------------------------+
| width (optional) | The desired width of the segment. |
+----------------------------+----------------------------------------------------------+
| title (optional) | The title of the segment. |
+----------------------------+----------------------------------------------------------+
| enabled (optional) | The enabled state of the segment. The default is `True`. |
+----------------------------+----------------------------------------------------------+
| imagePath (optional) | A file path to an image to display in the segment. |
+----------------------------+----------------------------------------------------------+
| imageNamed (optional) | The name of an image already loaded as a `NSImage`_ by |
| | the application to display in the segment. |
+----------------------------+----------------------------------------------------------+
| imageObject (optional) | A `NSImage`_ object to display in the segment. |
+----------------------------+----------------------------------------------------------+
| *imageTemplate* (optional) | A boolean representing if the image should converted |
| | to a template image. |
+----------------------------+----------------------------------------------------------+
**callback** The method to be called when the user presses the segmented button.
**selectionStyle** The selection style in the segmented button.
+-----------+---------------------------------------------+
| one | Only one segment may be selected. |
+-----------+---------------------------------------------+
| any | Any number of segments may be selected. |
+-----------+---------------------------------------------+
| momentary | A segmented is only selected when tracking. |
+-----------+---------------------------------------------+
**sizeStyle** A string representing the desired size style of the segmented button.
The options are:
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
| "mini" |
+-----------+
.. _NSImage: https://developer.apple.com/documentation/appkit/nsimage?language=objc
"""
nsSegmentedControlClass = NSSegmentedControl
nsSegmentedCellClass = NSSegmentedCell
frameAdjustments = {
"mini": (0, -1, 0, 1), #15
"small": (-2, -4, 2, 5), #20
"regular": (0, -4, 0, 5), #24
}
def __init__(self, posSize, segmentDescriptions, callback=None, selectionStyle="one", sizeStyle="small"):
self._setupView(self.nsSegmentedControlClass, posSize)
if self.nsSegmentedCellClass != NSSegmentedCell:
self._nsObject.setCell_(self.nsSegmentedCellClass.alloc().init())
if callback is not None:
self._setCallback(callback)
self._setSizeStyle(sizeStyle)
nsObject = self._nsObject
nsObject.setSegmentCount_(len(segmentDescriptions))
nsObject.cell().setTrackingMode_(_trackingModeMap[selectionStyle])
for segmentIndex, segmentDescription in enumerate(segmentDescriptions):
width = segmentDescription.get("width", 0)
title = segmentDescription.get("title", "")
enabled = segmentDescription.get("enabled", True)
imagePath = segmentDescription.get("imagePath")
imageNamed = segmentDescription.get("imageNamed")
imageTemplate = segmentDescription.get("imageTemplate")
imageObject = segmentDescription.get("imageObject")
# create the NSImage if needed
if imagePath is not None:
image = NSImage.alloc().initWithContentsOfFile_(imagePath)
elif imageNamed is not None:
image = NSImage.imageNamed_(imageNamed)
elif imageObject is not None:
image = imageObject
else:
image = None
nsObject.setWidth_forSegment_(width, segmentIndex)
nsObject.setLabel_forSegment_(title, segmentIndex)
nsObject.setEnabled_forSegment_(enabled, segmentIndex)
if image is not None:
if imageTemplate is not None:
# only change the image template setting if its either True or False
image.setTemplate_(imageTemplate)
nsObject.setImage_forSegment_(image, segmentIndex)
def getNSSegmentedButton(self):
"""
Return the `NSSegmentedControl`_ that this object wraps.
.. _NSSegmentedControl: https://developer.apple.com/documentation/appkit/nssegmentedcontrol?language=objc
"""
return self._nsObject
def enable(self, onOff):
"""
Enable or disable the object. **onOff** should be a boolean.
"""
for index in range(self._nsObject.segmentCount()):
self._nsObject.setEnabled_forSegment_(onOff, index)
def set(self, value):
"""
Set the selected segment. If this control is set to
`any` mode, `value` should be a list of integers.
Otherwise `value` should be a single integer.
"""
# value should be an int unless we are in "any" mode
if self._nsObject.cell().trackingMode() != _trackingModeMap["any"]:
value = [value]
for index in range(self._nsObject.segmentCount()):
state = index in value
self._nsObject.setSelected_forSegment_(state, index)
def get(self):
"""
Get the selected segment. If this control is set to
`any` mode, the returned value will be a list of integers.
Otherwise the returned value will be a single integer or
`None` if no segment is selected.
"""
states = []
for index in range(self._nsObject.segmentCount()):
state = self._nsObject.isSelectedForSegment_(index)
if state:
states.append(index)
if self._nsObject.cell().trackingMode() != _trackingModeMap["any"]:
if states:
return states[0]
return None
return states
|
{
"content_hash": "06fe8ea238b099cd5711607334e33c49",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 165,
"avg_line_length": 43.069148936170215,
"alnum_prop": 0.510065456341855,
"repo_name": "typesupply/vanilla",
"id": "f09bbd60230c68d013c591ce16bff45ac64d060b",
"size": "8097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/vanilla/vanillaSegmentedButton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "331750"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import ArrayField
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from picklefield.fields import PickledObjectField
from taiga.base.utils.time import timestamp_ms
from taiga.projects.tagging.models import TaggedMixin
from taiga.projects.occ import OCCModelMixin
from taiga.projects.notifications.mixins import WatchedModelMixin
from taiga.projects.mixins.blocked import BlockedMixin
class RolePoints(models.Model):
user_story = models.ForeignKey("UserStory", null=False, blank=False,
related_name="role_points",
verbose_name=_("user story"))
role = models.ForeignKey("users.Role", null=False, blank=False,
related_name="role_points",
verbose_name=_("role"))
points = models.ForeignKey("projects.Points", null=True, blank=False,
related_name="role_points",
verbose_name=_("points"))
class Meta:
verbose_name = "role points"
verbose_name_plural = "role points"
unique_together = ("user_story", "role")
ordering = ["user_story", "role"]
def __str__(self):
return "{}: {}".format(self.role.name, self.points.name)
@property
def project(self):
return self.user_story.project
class UserStory(OCCModelMixin, WatchedModelMixin, BlockedMixin, TaggedMixin, models.Model):
ref = models.BigIntegerField(db_index=True, null=True, blank=True, default=None,
verbose_name=_("ref"))
milestone = models.ForeignKey("milestones.Milestone", null=True, blank=True,
default=None, related_name="user_stories",
on_delete=models.SET_NULL, verbose_name=_("milestone"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="user_stories", verbose_name=_("project"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_user_stories", verbose_name=_("owner"),
on_delete=models.SET_NULL)
status = models.ForeignKey("projects.UserStoryStatus", null=True, blank=True,
related_name="user_stories", verbose_name=_("status"),
on_delete=models.SET_NULL)
is_closed = models.BooleanField(default=False)
points = models.ManyToManyField("projects.Points", blank=False,
related_name="userstories", through="RolePoints",
verbose_name=_("points"))
backlog_order = models.BigIntegerField(null=False, blank=False, default=timestamp_ms,
verbose_name=_("backlog order"))
sprint_order = models.BigIntegerField(null=False, blank=False, default=timestamp_ms,
verbose_name=_("sprint order"))
kanban_order = models.BigIntegerField(null=False, blank=False, default=timestamp_ms,
verbose_name=_("kanban order"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
finish_date = models.DateTimeField(null=True, blank=True,
verbose_name=_("finish date"))
subject = models.TextField(null=False, blank=False,
verbose_name=_("subject"))
description = models.TextField(null=False, blank=True, verbose_name=_("description"))
assigned_to = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
default=None, related_name="userstories_assigned_to_me",
verbose_name=_("assigned to"))
client_requirement = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is client requirement"))
team_requirement = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is team requirement"))
attachments = GenericRelation("attachments.Attachment")
generated_from_issue = models.ForeignKey("issues.Issue", null=True, blank=True,
on_delete=models.SET_NULL,
related_name="generated_user_stories",
verbose_name=_("generated from issue"))
external_reference = ArrayField(models.TextField(null=False, blank=False),
null=True, blank=True, default=None, verbose_name=_("external reference"))
tribe_gig = PickledObjectField(null=True, blank=True, default=None,
verbose_name="taiga tribe gig")
_importing = None
class Meta:
verbose_name = "user story"
verbose_name_plural = "user stories"
ordering = ["project", "backlog_order", "ref"]
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
if not self.status:
self.status = self.project.default_us_status
super().save(*args, **kwargs)
if not self.role_points.all():
for role in self.project.roles.all():
RolePoints.objects.create(role=role,
points=self.project.default_points,
user_story=self)
def __str__(self):
return "({1}) {0}".format(self.ref, self.subject)
def __repr__(self):
return "<UserStory %s>" % (self.id)
def get_role_points(self):
return self.role_points
def get_total_points(self):
not_null_role_points = [rp for rp in self.role_points.all() if rp.points.value is not None]
#If we only have None values the sum should be None
if not not_null_role_points:
return None
total = 0.0
for rp in not_null_role_points:
total += rp.points.value
return total
|
{
"content_hash": "715de5917e510da030159b32773b2477",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 110,
"avg_line_length": 48.42028985507246,
"alnum_prop": 0.572732714756061,
"repo_name": "mattcongy/itshop",
"id": "aaf78ad4aaa018a21ae62aabf43df39ed643cf8e",
"size": "7618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker-images/taigav2/taiga-back/taiga/projects/userstories/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103474"
},
{
"name": "CoffeeScript",
"bytes": "3380"
},
{
"name": "HTML",
"bytes": "274547"
},
{
"name": "JavaScript",
"bytes": "203660"
},
{
"name": "Nginx",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "3591150"
},
{
"name": "Ruby",
"bytes": "164978"
},
{
"name": "Shell",
"bytes": "5238"
}
],
"symlink_target": ""
}
|
import bitcoin as btc
# https://github.com/klingebj/shamir_bip39_2039
from shamir_bip39_2039.api import shares_to_mnemonic, check_mnemonic_checksum
from shamir_bip39_2039 import seed
import binascii
import hashlib
import argparse
parser = argparse.ArgumentParser(
description='Generate BIP32 addresses from mnemonic')
parser.add_argument(
'--mnemonic',
type=str,
help='Mnemonic (space delimited string)',
default=None)
parser.add_argument(
'--share1',
type=str,
help='Mnemonic share (space delimited string)',
default=None)
parser.add_argument(
'--share2',
type=str,
help='Mnemonic share (space delimited string)',
default=None)
parser.add_argument(
'--share3',
type=str,
help='Mnemonic share (space delimited string)',
default=None)
parser.add_argument(
'--passphrase', type=str, help='BIP32 passphrase', default=None)
parser.add_argument(
'--path',
type=int,
nargs=3,
required=True,
help='BIP32 derivation path (e.g. 0 0 0)')
parser.add_argument(
'--sha256pass',
dest='sha256pass',
action='store_true',
help='Apply sha256 to passphrase before use')
parser.add_argument(
'--key',
dest='return_key',
action='store_true',
help='Return the (hex) private key')
parser.add_argument(
'--wif',
dest='return_wif',
action='store_true',
help='Return the (wif) private key')
parser.add_argument(
'--args',
dest='print_args',
action='store_true',
help='Print input to script')
parser.add_argument(
'--show_tests',
dest='show_tests',
action='store_true',
help='Print test output')
def child(x, i):
# See https://github.com/vbuterin/pybitcointools/issues/58
return btc.bip32_ckd(x, 2**31 + i)
def descend(k, params):
for param in params:
k = child(k, param)
return k
def key_address(masterkey, path):
"""Compute address and private key (hex) for path"""
derived_key = descend(masterkey, path)
priv_key = btc.bip32_deserialize(derived_key)[-1]
pub_key = btc.bip32_extract_key(btc.bip32_privtopub(derived_key))
priv_key_hex = btc.encode_privkey(
btc.decode_privkey(priv_key, 'bin_compressed'), 'hex')
address = btc.pubkey_to_address(pub_key)
return priv_key_hex, address
def mnemonic_to_key(mnemonic, passphrase, path):
"""Return the (hex) private key"""
masterkey = btc.bip32_master_key(
seed.mnemonic_to_seed(mnemonic, passphrase))
key, _ = key_address(masterkey, path)
return key
def mnemonic_to_wif(mnemonic, passphrase, path):
"""Return the (wif) private key"""
return btc.encode_privkey(
mnemonic_to_key(mnemonic, passphrase, path), 'wif_compressed')
def mnemonic_to_address(mnemonic, passphrase, path):
"""Return the (compressed) bitcoin address"""
masterkey = btc.bip32_master_key(
seed.mnemonic_to_seed(mnemonic, passphrase))
_, addr = key_address(masterkey, path)
return addr
def test_addresses(show_tests=False):
"""Test address generation"""
test_vectors = [
("abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about",
[0, 0, 0], '1EbiwdYNzJ9jyJVsnEKkgV8cmi5iDvDMiS', 'TREZOR'),
("letter advice cage absurd amount doctor acoustic avoid letter advice cage above",
[0, 0, 1], '16MYqD3yixDdP2wSZN2r6eLQRZjTeVZKqs', 'TREZOR'),
("abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent",
[14, 14, 14], '17sheBr8x4kaApiMQSPzzbZYNKU5gs9csM', 'TREZOR'),
("letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always",
[3, 4, 5], '1EyvTRpcWq17HN7veVrzbUECxPjwcTVNyA', 'TREZOR'),
("ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic",
[1, 1, 1], '1JmDtj3aHuTknXtxPW7o9xUBiASzPD5VDu', 'TREZOR'),
("gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog",
[120, 14, 21], '16RJtTjpPF59b76vadgNUjgPqLVdZLAL8t', 'TREZOR'),
("hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length",
[1, 2, 3], '14w6DaQ4FkE7xtjovUrhSpThwNbSKydHBc', 'TREZOR'),
("scheme spot photo card baby mountain device kick cradle pact join borrow",
[0, 0, 17], '19okVLmMQcz6yKisknSQQMutAsC1XszRdu', 'TREZOR'),
("horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave",
[1, 0, 0], '15q3smKAzaXsCJAbY6sGevZsE415xSijjL', 'TREZOR'),
("panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside",
[12, 0, 90], '1Kfm6D7oYtEijcqRiEkTA6SnMoH6o3uWnj', 'TREZOR'),
("panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside",
[12, 0, 90], '17FngVD55Rh6qkxcwpwadV29KuydXPzR57', ''),
("panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside",
[12, 0, 90], '1GQsb7tCBAKvvxRXwv9ixD67NMuSaeRGeM',
'ab8arstoienA$aoarsto_AST8405582811-arstarfcf292dnastratoarston4uq03gda'
),
("panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost adult",
[12, 0, 90], '1Ho6ueo9mrtGfQgaPiNXDVs5Ecmfsp6V5e',
'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'),
("craft tide sword holiday resemble process mammal hawk top seven reform thumb please blade million rich deny airport civil rough property torch raven beyond",
[81, 22, 86], "1CBRcSVCj9YJ4SMceyWeHJSpokeEnRaS1N",
"047bbab9d3c9a818b3107747da1b7fe1d22fbe5c733d78b6de0a33e7a3bee955"),
("surface book trick bicycle wisdom donkey slight flight this chicken unusual explain quit proof creek brisk brother rent swamp earn else penalty lyrics account",
[16, 21, 58], "142bJdVy1JZEeFYXyEBUT99nSbRQ26aDXQ",
"f5953d27c02783e3ab28138de27c68eb3f0331047efe8d2efa47c6b38c855004"),
("delay easily trumpet crane about cushion indoor vendor hockey duty resource fly exit member juice thunder snake unhappy school shift nature doll cousin alarm",
[85, 51, 49], "13J1zrUvuRzraBNEW8vP1ZpnMBc4KuaSTn",
"ff70868760c428fe5b76cb7f9204e0d0296109bfd3158bc4be4653e48f847d8c"),
("grit idea display balcony twist planet embody oval chicken liberty boss very wreck vapor embody visa unable country false atom athlete access unaware awful",
[65, 47, 97], "112EJEA6v3iWafpgXS5j4kmujJX4U128mr",
"2c96814bd95ba61ecc46cda371e1f538d81cd03f4f0211004ef4ecd2affee0a1")
]
for i, (mnemonic, path, addr, passphrase) in enumerate(test_vectors):
assert addr == mnemonic_to_address(
mnemonic.split(' '), passphrase, path)
if show_tests:
print i, addr, mnemonic_to_address(
mnemonic.split(' '), passphrase, path)
if __name__ == '__main__':
args = parser.parse_args()
# Be sure everything is working...
test_addresses(args.show_tests)
#Check that either a mnemomic or two shares are provided
num_shares = (args.share1 is not None) + (args.share2 is not None) + (
args.share3 is not None)
assert (args.mnemonic is not None) or (
num_shares == 2), "Must provide mnemonic or two shares"
def split(x):
return None if x is None else x.split(' ')
if args.mnemonic is not None:
mnemonic = split(args.mnemonic)
else:
mnemonic = shares_to_mnemonic(
share1=split(args.share1),
share2=split(args.share2),
share3=split(args.share3))
assert check_mnemonic_checksum(mnemonic)
if args.sha256pass:
passphrase = hashlib.sha256(args.passphrase).hexdigest()
else:
passphrase = args.passphrase
if args.print_args:
print "\nMnemonic:", mnemonic
print "Mnemonic length:", len(mnemonic), '\n'
print "Passphrase:", passphrase
print "Passphrase length:", len(passphrase), '\n'
print "Path:", args.path
print "Path length:", len(args.path), '\n'
if args.return_wif:
print mnemonic_to_wif(mnemonic, passphrase, args.path)
elif args.return_key:
print mnemonic_to_key(mnemonic, passphrase, args.path)
else:
print mnemonic_to_address(mnemonic, passphrase, args.path)
|
{
"content_hash": "26a149a9bf787623d6f396e9b63f2926",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 170,
"avg_line_length": 41.35813953488372,
"alnum_prop": 0.6887089518668466,
"repo_name": "klingebj/shamir_bip39_2039",
"id": "08e2ad8b127eef95913f8cd51ffb3062e909100b",
"size": "9282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/little_bip32.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28936"
}
],
"symlink_target": ""
}
|
import json
import random
import requests
from olipy.ia import Text
from botfriend.bot import BasicBot
from botfriend.model import Post
class JunkMailBot(BasicBot):
COLLECTION = "tednelsonjunkmail"
def update_state(self):
cutoff = self.model.last_state_update_time
old_state = self.model.json_state or []
query = Text.recent("collection:%s" % self.COLLECTION, cutoff=cutoff)
new_items = [x.identifier for x in query]
all_items = set(old_state + new_items)
return list(all_items)
def new_post(self):
# Choose a random identifier from the current state.
identifier = random.choice(self.model.json_state)
if not identifier:
return None
text = Text(identifier)
title = text.metadata['title']
page_num = random.randint(0, text.pages-1)
reader_url = text.reader_url(page_num)
image_url = text.image_url(page_num)
# Create the post.
text = "%s\n\n%s" % (title, reader_url)
post, is_new = Post.from_content(
self.model, text, reuse_existing=False
)
# Attach the image.
if not image_url:
return None
response = requests.get(image_url)
media_type = response.headers['Content-Type']
post.attach(media_type, content=response.content)
return post
Bot = JunkMailBot
|
{
"content_hash": "174e0d504fd51c3b8f4bb8edca983838",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 29.829787234042552,
"alnum_prop": 0.6241084165477889,
"repo_name": "leonardr/botfriend",
"id": "d214ac14f1fcb69e7b822f50e48f18e109af3ad0",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bots.sample/junkmail/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172541"
}
],
"symlink_target": ""
}
|
"""Decorator for view methods to help with data validation."""
from functools import wraps
import logging
import voluptuous as vol
# mypy: allow-untyped-defs
_LOGGER = logging.getLogger(__name__)
class RequestDataValidator:
"""Decorator that will validate the incoming data.
Takes in a voluptuous schema and adds 'post_data' as
keyword argument to the function call.
Will return a 400 if no JSON provided or doesn't match schema.
"""
def __init__(self, schema, allow_empty=False):
"""Initialize the decorator."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
self._schema = schema
self._allow_empty = allow_empty
def __call__(self, method):
"""Decorate a function."""
@wraps(method)
async def wrapper(view, request, *args, **kwargs):
"""Wrap a request handler with data validation."""
data = None
try:
data = await request.json()
except ValueError:
if not self._allow_empty or (await request.content.read()) != b"":
_LOGGER.error("Invalid JSON received.")
return view.json_message("Invalid JSON.", 400)
data = {}
try:
kwargs["data"] = self._schema(data)
except vol.Invalid as err:
_LOGGER.error("Data does not match schema: %s", err)
return view.json_message(f"Message format incorrect: {err}", 400)
result = await method(view, request, *args, **kwargs)
return result
return wrapper
|
{
"content_hash": "be5afc9672773fa64841c327e6a47851",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 82,
"avg_line_length": 31.0188679245283,
"alnum_prop": 0.5790754257907542,
"repo_name": "Teagan42/home-assistant",
"id": "51b3b5617e49cbeff498eff7e6a66efa93f0e59a",
"size": "1644",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/http/data_validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
from collections import OrderedDict
except ImportError:
if not '_OrderedDict' in dir():
from odict import OrderedDict
else:
OrderedDict = _OrderedDict
class OrderedConfigParser(ConfigParser.ConfigParser):
"""
Customization of ConfigParser to (a) use an ordered dictionary and (b)
keep the original case of the data keys.
"""
def __init__(self):
if sys.version_info >= (2,6,0):
ConfigParser.ConfigParser.__init__(self, dict_type=OrderedDict)
else:
ConfigParser.ConfigParser.__init__(self)
self._defaults = OrderedDict()
self._sections = OrderedDict()
def _get_sections(self, fp):
"""
In old version of Python, we prefetch the sections, to
ensure that the data structures we are using are OrderedDict.
"""
if sys.version_info >= (2,6,0):
return
while True:
line = fp.readline()
if not line:
break
line.strip()
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if not sectname in self._sections:
self._sections[sectname] = OrderedDict()
self._sections[sectname]['__name__'] = sectname
def _read(self, fp, fpname):
"""Parse a sectoned setup file.
This first calls _get_sections to preparse the section info,
and then calls the ConfigParser._read method.
"""
self._get_sections(fp)
fp.seek(0)
return ConfigParser.ConfigParser._read(self, fp, fpname)
def optionxform(self, option):
"""Do not convert to lower case"""
return option
|
{
"content_hash": "11340d160c98e2fcbf9026b6d916aea3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 30.8,
"alnum_prop": 0.5806277056277056,
"repo_name": "PyUtilib/pyutilib.virtualenv",
"id": "1c71cae8cec21589f8793bd85534b91afa2df88c",
"size": "2281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyutilib/virtualenv/OrderedConfigParser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10042928"
},
{
"name": "Shell",
"bytes": "1810"
}
],
"symlink_target": ""
}
|
"""Surfaces, blitting and animation.
"""
####
import pygame
import random
####
def random_rgb():
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
####
class PygView(object):
def __init__(self, width=800, height=600, fps=50):
"""Initializing background surface for static drawing
and screen surface for dynamic drawing
"""
pygame.init()
pygame.display.set_caption("Press ESC to quit")
self.width = width
self.height = height
self.screen = pygame.display.set_mode((self.width, self.height), pygame.DOUBLEBUF)
self.background = pygame.Surface(self.screen.get_size()).convert()
# white blackground
self.background.fill((255, 255, 255))
self.fps = fps
self.clock = pygame.time.Clock()
self.act_surface = self.screen
self.act_rgb = 255, 0, 0
def draw_static(self):
self.act_surface = self.background
def draw_dynamic(self):
self.act_surface = self.screen
def set_color(self, rgb):
self.act_rgb = rgb
def circle(self, x, y, radius, width):
"""Allocate surface for blitting and draw circle."""
rad2 = 2 * radius
surface = pygame.Surface((rad2, rad2))
pygame.draw.circle(surface, self.act_rgb, (radius, radius), radius, width)
surface.set_colorkey((0, 0, 0))
self.act_surface.blit(surface.convert_alpha(), (x, y))
def run(self, draw_dynamic):
"""The Mainloop.
"""
running = True
while running:
self.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
draw_dynamic()
pygame.quit()
def flip(self):
pygame.display.flip()
self.clock.tick(self.fps)
self.screen.blit(self.background, (0, 0))
####
class Ball(object):
"""A circle object with no hardcoded dependency on pygame
(and other libs too, obviously...)
"""
def __init__(self, x, y, radius, speed_x=1, speed_pulse=0, color=(0,0,255), width=0):
self.x = x
self.y = y
self.radius = radius
self.act_radius = radius
self.speed_x = speed_x
self.speed_pulse = speed_pulse
self.color = color
self.width = width
self.shrinking = True
@property
def max_x(self):
return self.x + self.radius * 2
def rel_move(self, dx, dy):
self.x += dx
self.y += dy
def pulse(self):
"""Shrink or expand ball.
"""
if not self.speed_pulse:
return
# balls are shrinking first
if self.shrinking:
if self.act_radius > self.width:
self.act_radius -= self.speed_pulse
self.act_radius = max(self.act_radius, self.width)
else:
self.shrinking = False
else:
if self.act_radius < self.radius:
self.act_radius += self.speed_pulse
else:
self.shrinking = True
def draw(self, view):
""" Draw on a device with an appropriate interface."""
if self.speed_pulse:
color = random_rgb()
else:
color = self.color
view.set_color(color)
view.circle(self.x, self.y, self.act_radius, self.width)
####
def action(balls, width, view):
""" Return a function for the pygame mainloop.
"""
# balls move to the right first
right_moving = [True] * len(balls)
def animate_balls():
""" Draw moving balls."""
for i, ball in enumerate(balls):
if right_moving[i]:
if ball.max_x < width:
ball.rel_move(ball.speed_x, 0)
else:
right_moving[i] = False
else:
if ball.x > 0:
ball.rel_move(-ball.speed_x, 0)
else:
right_moving[i] = True
ball.pulse()
ball.draw(view)
return animate_balls
####
def main(width):
"""Simple example with stationary and moving balls.
"""
view = PygView(width)
view.draw_static()
# args: x, y, radius, speed_x, speed_pulse, color, borderwidth
# borderwidth <= radius !
ball01 = Ball(50, 60, 50, 0, 0, (255, 255, 0))
ball01.draw(view)
ball02 = Ball(250, 150, 190, 0, 0, (66, 1, 166))
ball02.draw(view)
view.draw_dynamic()
ball1 = Ball(15, 130, 100, 1, 0, (255, 0, 0))
ball2 = Ball(25, 200, 120, 2, 0, (0, 255, 155))
ball3 = Ball(20, 220, 110, 1, 1, (100, 55, 155))
ball4 = Ball(20, 400, 70, 3, 0, (250, 100, 255))
ball5 = Ball(90, 390, 70, 0, 1, (250, 100, 255), 1)
loopfunc = action((ball1, ball2, ball5, ball4), width, view)
view.run(loopfunc)
####
if __name__ == '__main__':
main(920)
|
{
"content_hash": "8d1bb90b99864db799d53f77a50de7b6",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 90,
"avg_line_length": 24.54326923076923,
"alnum_prop": 0.534769833496572,
"repo_name": "yipyip/Pygame-Examples",
"id": "4e84b83d2a95b30825503a718676c8cabb82a7fb",
"size": "5155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blitballs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45622"
}
],
"symlink_target": ""
}
|
"""
testifi.pypi
~~~~~~~~~~~~
This module contains the portions of testifi code that know how to handle
interacting with PyPI.
"""
import treq
import structlog
from twisted.internet.defer import inlineCallbacks, returnValue
logger = structlog.getLogger()
@inlineCallbacks
def certifiVersions():
"""
This function determines what certifi versions are available and can be
tested. It uses as its baseline the 14.05.14 release of certifi, and will
locate all other verisons.
:returns: A Deferred that fires with a list of tuples of certifi versions
and tarball URLs.
"""
log = logger.new(function='certifiVersions')
r = yield treq.get('https://pypi.python.org/pypi/certifi/json', timeout=5)
log.msg("got certifi versions!")
data = yield r.json()
# Note: this takes advantage of the fact that certifi's releases have the
# same version number sort order as lexicographical. If that changes,
# this will break.
releases = sorted(data[u'releases'].keys())
first_release = releases.index('14.05.14')
target_versions = releases[first_release:]
result = []
for version in target_versions:
files = data[u'releases'][version]
# Find the .tar.gz release.
for file in files:
if file[u'filename'].endswith(u'.tar.gz'):
break
else:
continue
log.msg("new release located", version=version, tarball=file[u'url'])
result.append((version, file[u'url']))
returnValue(result)
@inlineCallbacks
def downloadFile(remote_path, fobj):
"""
Download a file over HTTP from ``remote_path`` and save it to the provided
file object ``fobj``.
"""
logger.msg(
"downloading file", remote_path=remote_path, function='downloadFile'
)
def file_writer(data):
fobj.write(data)
remote_path = remote_path.encode('utf-8')
r = yield treq.get(remote_path, timeout=5)
try:
yield treq.collect(r, file_writer)
except Exception as e:
print e
raise
|
{
"content_hash": "6c4938fd5d126017684a04da3607c0b4",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 27.263157894736842,
"alnum_prop": 0.652992277992278,
"repo_name": "Lukasa/testifi",
"id": "910d15599ccb691c530ea66c29faa6ddd211afe4",
"size": "2096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testifi/pypi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12391"
}
],
"symlink_target": ""
}
|
"""simulation data operations
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo.template.lattice import LatticeUtil
from sirepo.template.template_common import ParticleEnergy
import math
import numpy
import re
import sirepo.sim_data
import sirepo.simulation_db
class SimData(sirepo.sim_data.SimDataBase):
@classmethod
def add_ptc_track_commands(cls, data):
def _set_ptc_ids(ptc_commands, data):
m = LatticeUtil.max_id(data) + 1
for i, c in enumerate(ptc_commands):
c._id = m + i
return ptc_commands
data.models.bunch.beamDefinition = "gamma"
data.models.commands.extend(
_set_ptc_ids(
[
PKDict(_type="ptc_create_universe"),
PKDict(_type="ptc_create_layout"),
PKDict(_type="ptc_track", file="1", icase="6"),
PKDict(_type="ptc_track_end"),
PKDict(_type="ptc_end"),
],
data,
)
)
@classmethod
def beamline_elements(cls, madx):
elmap = PKDict({e._id: e for e in madx.elements})
for el_id in madx.beamlines[0]["items"]:
yield elmap[el_id]
@classmethod
def controls_madx_dir(cls):
return sirepo.simulation_db.simulation_dir("madx")
@classmethod
def current_field(cls, kick_field):
return "current_{}".format(kick_field)
@classmethod
def default_optimizer_settings(cls, madx):
targets = []
for el in cls.beamline_elements(madx):
if el.type in ("MONITOR", "HMONITOR", "VMONITOR"):
item = cls.model_defaults("optimizerTarget")
item.name = el.name
if el.type == "HMONITOR":
del item["y"]
elif el.type == "VMONITOR":
del item["x"]
targets.append(item)
opts = cls.model_defaults("optimizerSettings").pkupdate(
PKDict(
targets=targets,
)
)
cls.init_optimizer_inputs(opts, madx)
return opts
@classmethod
def fixup_old_data(cls, data, qcall, **kwargs):
dm = data.models
cls._init_models(
dm,
(
"beamPositionAnimation",
"bunch",
"command_beam",
"dataFile",
"deviceServerMonitor",
"initialMonitorPositionsReport",
"instrumentAnimationAll",
"instrumentAnimationTwiss",
),
)
if "externalLattice" in dm:
sirepo.sim_data.get_class("madx").fixup_old_data(
dm.externalLattice, qcall=qcall
)
if "optimizerSettings" not in dm:
dm.optimizerSettings = cls.default_optimizer_settings(
dm.externalLattice.models
)
if "controlSettings" not in dm:
cls.init_process_variables(dm)
cls.init_currents(dm.command_beam, dm.externalLattice.models)
cls._init_models(dm, ("controlSettings", "optimizerSettings"))
if "inputs" not in dm.optimizerSettings:
cls.init_optimizer_inputs(
dm.optimizerSettings, dm.externalLattice.models
)
cls._remove_old_command(dm.externalLattice.models)
if (
dm.command_beam.gamma == 0
and "pc" in dm.command_beam
and dm.command_beam.pc > 0
):
cls.update_beam_gamma(dm.command_beam)
dm.command_beam.pc = 0
if "command_twiss" in dm:
for f in dm.command_twiss:
if f in dm.bunch:
dm.bunch[f] = dm.command_twiss[f]
del dm["command_twiss"]
if "externalLattice" in dm:
cls.add_ptc_track_commands(dm.externalLattice)
@classmethod
def init_optimizer_inputs(cls, optimizerSettings, madx):
optimizerSettings.inputs = PKDict(kickers=PKDict(), quads=PKDict())
for el in cls.beamline_elements(madx):
if el.type == "QUADRUPOLE":
optimizerSettings.inputs.quads[str(el._id)] = False
elif "KICKER" in el.type:
optimizerSettings.inputs.kickers[str(el._id)] = True
@classmethod
def init_currents(cls, beam, models):
def is_kick_field(field):
return re.search(r"^(.?kick|k1)$", field)
ac = AmpConverter(beam)
for el in cls.beamline_elements(models):
for f in list(el.keys()):
if is_kick_field(f):
el[cls.current_field(f)] = ac.kick_to_current(el[f])
@classmethod
def init_process_variables(cls, models):
pvs = []
def _add_pv(elId, dim, write="0"):
pvs.append(
PKDict(
elId=elId,
pvDimension=dim,
isWritable=write,
pvName="",
)
)
models.controlSettings = cls.model_defaults("controlSettings").pkupdate(
{
"processVariables": pvs,
}
)
for el in cls.beamline_elements(models.externalLattice.models):
if el.type == "MONITOR":
_add_pv(el._id, "horizontal")
_add_pv(el._id, "vertical")
elif el.type == "HMONITOR":
_add_pv(el._id, "horizontal")
elif el.type == "VMONITOR":
_add_pv(el._id, "vertical")
elif el.type == "KICKER":
_add_pv(el._id, "horizontal")
_add_pv(el._id, "horizontal", "1")
_add_pv(el._id, "vertical")
_add_pv(el._id, "vertical", "1")
elif el.type == "HKICKER":
_add_pv(el._id, "horizontal")
_add_pv(el._id, "horizontal", "1")
elif el.type == "VKICKER":
_add_pv(el._id, "vertical")
_add_pv(el._id, "vertical", "1")
elif el.type == "QUADRUPOLE":
_add_pv(el._id, "none")
return models
@classmethod
def update_beam_gamma(cls, beam):
beam.gamma = ParticleEnergy.compute_energy(
"madx",
beam.particle,
beam,
).gamma
@classmethod
def _compute_job_fields(cls, data, r, compute_model):
res = []
if r == "initialMonitorPositionsReport":
res = ["controlSettings", "dataFile", "externalLattice"]
return res
@classmethod
def _compute_model(cls, analysis_model, *args, **kwargs):
if "instrument" in analysis_model or analysis_model == "beamPositionAnimation":
return "instrumentAnimation"
return super(SimData, cls)._compute_model(analysis_model, *args, **kwargs)
@classmethod
def _lib_file_basenames(cls, data):
if "controlSettings" in data.models:
n = data.models.controlSettings.inputLogFile
if n:
return [
cls.lib_file_name_with_model_field(
"controlSettings", "inputLogFile", n
)
]
return []
@classmethod
def _remove_old_command(cls, dm):
cmds = []
for cmd in dm.commands:
if cmd._type == "select" or cmd._type == "twiss":
continue
cmds.append(cmd)
dm.commands = cmds
class AmpConverter:
_GEV_TO_KG = 1.78266192e-27
# Coulomb
_ELEMENTARY_CHARGE = 1.602176634e-19
_SCHEMA = SimData.schema()
def __init__(self, beam, amp_table=None, default_factor=100):
if amp_table and len(amp_table[0]) < 2:
raise AssertionError("invalid amp_table: {}".format(amp_table))
self._computed_reverse_table = False
self._amp_table = [r for r in map(lambda x: [x[0], x[1]], amp_table or [])]
self._beam_info = self.__beam_info(beam)
self._default_factor = default_factor
def current_to_kick(self, current):
return self.__compute_kick(current, self.__interpolate_table(current, 0, 1))
def kick_to_current(self, kick):
if not self._computed_reverse_table:
self._computed_reverse_table = True
self.__build_reverse_map()
return self.__compute_current(float(kick), self.__interpolate_table(kick, 2, 1))
def __beam_info(self, beam):
if beam.get("particle") and self._SCHEMA.constants.particleMassAndCharge.get(
beam.particle
):
pmc = self._SCHEMA.constants.particleMassAndCharge.get(beam.particle)
else:
pmc = [beam.mass, beam.charge]
return PKDict(
mass=pmc[0] * self._GEV_TO_KG,
charge=pmc[1] * self._ELEMENTARY_CHARGE,
gamma=beam.gamma,
beta=math.sqrt(1 - (1 / (beam.gamma * beam.gamma))),
)
def __build_reverse_map(self):
if self._amp_table:
for row in self._amp_table:
row.append(self.__compute_kick(row[0], row[1]))
def __compute_current(self, kick, factor):
b = self._beam_info
return (
kick
* b.gamma
* b.mass
* b.beta
* self._SCHEMA.constants.clight
/ (b.charge * factor)
)
def __compute_kick(self, current, factor):
b = self._beam_info
return (
current
* b.charge
* factor
/ (b.gamma * b.mass * b.beta * self._SCHEMA.constants.clight)
)
def __interpolate_table(self, value, from_index, to_index):
if not self._amp_table:
return self._default_factor
table = numpy.vstack(self._amp_table)
return numpy.interp(value, table[:, from_index], table[:, to_index])
|
{
"content_hash": "eb0b41e6e20d6286c4735a6525d9e828",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 88,
"avg_line_length": 34.64505119453925,
"alnum_prop": 0.5296029947788395,
"repo_name": "radiasoft/sirepo",
"id": "e5d999c8de774c52f63cac8627fc1ee1ceac0bad",
"size": "10175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sirepo/sim_data/controls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "152"
},
{
"name": "CSS",
"bytes": "65716"
},
{
"name": "HTML",
"bytes": "144600"
},
{
"name": "JavaScript",
"bytes": "3855752"
},
{
"name": "Jinja",
"bytes": "190763"
},
{
"name": "Jupyter Notebook",
"bytes": "1262"
},
{
"name": "Opal",
"bytes": "61806"
},
{
"name": "Perl",
"bytes": "31089"
},
{
"name": "Python",
"bytes": "3022923"
},
{
"name": "SCSS",
"bytes": "29855"
},
{
"name": "Shell",
"bytes": "21259"
}
],
"symlink_target": ""
}
|
import multiprocessing
from multiprocessing.managers import BaseManager
from message import Msg
from evdev import InputDevice, categorize, ecodes, KeyEvent, list_devices
from select import select
import xml.etree.ElementTree as ET
import logging
logger = logging.getLogger(__name__)
class KeyMsg(Msg):
def __init__(self, action):
self.msg_type = 'key'
self.value = action
self.needs_ack = True
class KeyListener(multiprocessing.Process):
def __init__(self, pipe, msg_queue, dev_names, keymap_file = '../data/keymap.xml'):
multiprocessing.Process.__init__(self)
self.pipe = pipe
self.msg_queue = msg_queue
self.read_keymap(keymap_file)
available_devices = list_devices()
devices_to_use = [d for d in dev_names if d in available_devices]
devices = map(InputDevice, devices_to_use)
self.devices = {dev.fd: dev for dev in devices}
self.timeout = 0.1 # in seconds
def run(self):
logger.warning("Key listener running")
while True:
r,w,x = select(self.devices, [], [], self.timeout)
for fd in r:
for event in self.devices[fd].read():
if (event.type != ecodes.EV_REL) and (event.type != ecodes.EV_SYN):
logger.info(categorize(event))
if event.type == ecodes.EV_KEY:
if event.type == ecodes.EV_KEY and event.value == 1: #event.value == 1 => this is key down
code = event.code
logger.info("Key code: %s" % code)
if code in self.actions:
logger.info("Adding message to queue: %s" % (self.actions[code],))
self.msg_queue.put(KeyMsg(self.actions[code]))
if self.actions[code] == 'quit':
logger.warning("KeyListener terminating")
return
if self.actions[code] == 'error':
logger.warning("Error in KeyListner, respawn")
return
if self.pipe.poll():
logger.info("key listener message received")
cmnd = self.pipe.recv()
if cmnd[0] == 'quit':
break
else:
logger.error("Message not recognized for key listener module: %s" % cmnd[0])
logger.warning('Terminating Key Listener')
def read_keymap(self, keymap_file):
self.actions = {}
self.tree = ET.parse(keymap_file)
actionmap = self.tree.getroot()
for action in actionmap:
self.actions[ecodes.ecodes[action.get('key')]] = action.get('action')
logger.info('Keymap loaded')
logger.debug(self.actions)
if __name__ == "__main__":
print 'This is module for KeyListener class'
logging.basicConfig(level=logging.INFO)
msg_queue = multiprocessing.Queue()
t, f = multiprocessing.Pipe()
keylistener = KeyListener(t, msg_queue, ['/dev/input/event0','/dev/input/event1'])
keylistener.start()
while True:
msg = msg_queue.get()
print msg.value
if msg.value == 'quit':
break
if msg.value == 'error':
keylistener.join()
keylistener = KeyListener(msg_queue, ['/dev/input/event0','/dev/input/event1'])
keylistener.start()
keylistener.join()
|
{
"content_hash": "158100370aba789f5f8ea87300a97c6b",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 114,
"avg_line_length": 40.85057471264368,
"alnum_prop": 0.546426561620709,
"repo_name": "Elwetana/rfid_player",
"id": "1ed8cc0784dc1dbb574b8383e66d947fc5090087",
"size": "3573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/keyinput.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18887"
},
{
"name": "Python",
"bytes": "78884"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
from ckeditor.widgets import CKEditorWidget
from django import forms
from djanban.apps.base.auth import get_user_boards
from djanban.apps.multiboards.models import Multiboard
from crequest.middleware import CrequestMiddleware
# Multiboard form
class MultiboardForm(forms.ModelForm):
class Meta:
model = Multiboard
fields = [
"name", "description", "is_archived", "order", "boards", "members",
# Inform if the multiboard must be shown in index
"show_in_index",
# Inform if the tasks of the following statuses must be shown
"show_backlog_tasks",
"show_ready_to_develop_tasks",
"show_development_tasks",
"show_after_development_in_review_tasks",
"show_after_development_waiting_release_tasks",
"show_done_tasks"
]
class Media:
css = {
'all': ('css/multiboards/form.css',)
}
js = (
'js/multiboards/form.js',
)
def __init__(self, *args, **kwargs):
super(MultiboardForm, self).__init__(*args, **kwargs)
self.fields["description"].widget = CKEditorWidget()
current_request = CrequestMiddleware.get_request()
current_user = current_request.user
# Available boards for this user
self.fields["boards"].choices = [
(board.id, board.name) for board in get_user_boards(current_user).filter(is_archived=False).order_by("name")
]
# Members of a multiboard
current_member = current_user.member
self.fields["members"].choices = [
(member.id, member.external_username) for member in current_member.team_mates
]
def save(self, commit=True):
super(MultiboardForm, self).save(commit=commit)
if commit:
if not self.instance.members.filter(id=self.instance.creator.id).exists():
self.instance.members.add(self.instance.creator)
# Delete multiboard form
class DeleteMultiboardForm(forms.Form):
confirmed = forms.BooleanField(label=u"Confirm you want to delete this multiboard")
# Leave multiboard form
class LeaveMultiboardForm(forms.Form):
confirmed = forms.BooleanField(label=u"Confirm you want to leave this multiboard")
|
{
"content_hash": "c5b6d75a1e8b833c8ae0d0745c2ddcc2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 120,
"avg_line_length": 36.10769230769231,
"alnum_prop": 0.6429484448231785,
"repo_name": "diegojromerolopez/djanban",
"id": "0a6c4e86bb1e4791c856e6bed3ecbd25845388e6",
"size": "2372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/djanban/apps/multiboards/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79709"
},
{
"name": "HTML",
"bytes": "660275"
},
{
"name": "JavaScript",
"bytes": "634320"
},
{
"name": "Python",
"bytes": "993818"
},
{
"name": "Shell",
"bytes": "1732"
},
{
"name": "TypeScript",
"bytes": "71578"
}
],
"symlink_target": ""
}
|
"""Manage external data directories mounted to a docker container.
"""
from __future__ import print_function
import os
import six
from bcbiovm.docker import remap
def prepare_system(datadir, docker_biodata_dir):
"""Create set of system mountpoints to link into Docker container.
"""
mounts = []
for d in ["genomes", "liftOver", "gemini_data", "galaxy"]:
cur_d = os.path.normpath(os.path.realpath(os.path.join(datadir, d)))
if not os.path.exists(cur_d):
os.makedirs(cur_d)
mounts.append("{cur_d}:{docker_biodata_dir}/{d}".format(**locals()))
return mounts
def update_config(config, fcdir=None):
"""Resolve relative and symlinked path, providing mappings for docker container.
"""
config, directories = normalize_config(config, fcdir)
if config.get("upload", {}).get("dir"):
directories.append(config["upload"]["dir"])
mounts = {}
for i, d in enumerate(sorted(set(directories))):
mounts[d] = d
mounts = ["%s:%s" % (k, v) for k, v in mounts.items()]
config = remap.external_to_docker(config, mounts)
return config, mounts
def normalize_config(config, fcdir=None):
"""Normalize sample configuration file to have absolute paths and collect directories.
Prepares configuration for remapping directories into docker containers.
"""
absdetails = []
directories = []
ignore = ["variantcaller", "realign", "recalibrate", "phasing", "svcaller"]
for d in config["details"]:
d = abs_file_paths(d, base_dirs=[fcdir] if fcdir else None,
ignore=["description", "analysis", "resources",
"genome_build", "lane"])
d["algorithm"] = abs_file_paths(d["algorithm"], base_dirs=[fcdir] if fcdir else None,
ignore=ignore)
absdetails.append(d)
directories.extend(_get_directories(d, ignore))
if config.get("upload", {}).get("dir"):
config["upload"]["dir"] = os.path.normpath(os.path.realpath(
os.path.join(os.getcwd(), config["upload"]["dir"])))
if not os.path.exists(config["upload"]["dir"]):
os.makedirs(config["upload"]["dir"])
config["details"] = absdetails
return config, directories
def find_genome_directory(dirname):
"""Handle external non-docker installed biodata located relative to config directory.
"""
mounts = []
sam_loc = os.path.join(dirname, "tool-data", "sam_fa_indices.loc")
genome_dirs = {}
if os.path.exists(sam_loc):
with open(sam_loc) as in_handle:
for line in in_handle:
if line.startswith("index"):
parts = line.split()
genome_dirs[parts[1].strip()] = parts[-1].strip()
for genome_dir in sorted(list(set(genome_dirs.values()))):
# Special case used in testing -- relative paths
if genome_dir and not os.path.isabs(genome_dir):
rel_genome_dir = os.path.dirname(os.path.dirname(os.path.dirname(genome_dir)))
full_genome_dir = os.path.normpath(os.path.join(os.path.dirname(sam_loc), rel_genome_dir))
mounts.append("%s:%s" % (full_genome_dir, full_genome_dir))
return mounts
def _get_directories(xs, ignore):
"""Retrieve all directories specified in an input file.
"""
out = []
if not isinstance(xs, dict):
return out
for k, v in xs.items():
if k not in ignore:
if isinstance(v, dict):
out.extend(_get_directories(v, ignore))
elif v and isinstance(v, six.string_types) and os.path.exists(v) and os.path.isabs(v):
out.append(os.path.dirname(v))
elif v and isinstance(v, (list, tuple)) and os.path.exists(v[0]) and os.path.isabs(v[0]):
out.extend(os.path.dirname(x) for x in v)
out = [x for x in out if x]
return out
def _normalize_path(x, base_dirs):
for base_dir in base_dirs:
if os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.realpath(os.path.join(base_dir, x)))
return None
def abs_file_paths(xs, base_dirs=None, ignore=None):
"""Expand files to be absolute, non-symlinked file paths.
"""
if not isinstance(xs, dict):
return xs
base_dirs = base_dirs if base_dirs else []
base_dirs.append(os.getcwd())
ignore_keys = set(ignore if ignore else [])
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, six.string_types) and _normalize_path(v, base_dirs):
out[k] = _normalize_path(v, base_dirs)
elif k not in ignore_keys and v and isinstance(v, (list, tuple)) and _normalize_path(v[0], base_dirs):
out[k] = [_normalize_path(x, base_dirs) for x in v]
else:
out[k] = v
return out
|
{
"content_hash": "9a88c51e9f032f13e4d339205af1ab56",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 110,
"avg_line_length": 41.68376068376068,
"alnum_prop": 0.6065204018864055,
"repo_name": "fw1121/bcbio-nextgen-vm",
"id": "d8623e0eda4b197f8ddbab26d97610558ca4f4dc",
"size": "4877",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bcbiovm/docker/mounts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142483"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
}
|
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
"""
De los datos proporcionados por SCHP, se eligen algunas columnas para trabajar la información
y se exploran las relaciones entre los montos registrados por mes, año, mes-año y año-sector.
La intención es responder las siguientes preguntas sencillas:
¿En que anio se registro un mayor monto?
¿En que mes se tienen la media mas alta de monto?
¿En que anio y mes se registran los mayores montos?
¿En que anio y sector se registran los mayores montos?
fuente: http://catalogo.datos.gob.mx/dataset/estadisticas-oportunas-de-finanzas-publicas
"""
#%%librerias
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
#%% tipo de grafico
plt.rcParams['figure.figsize']=(20,7)
#%% cargar datos
data = pd.read_csv('../data/ingresos.csv')
#%% checar la informacion
data.head()
#%% numero de filas y columnas
data.shape
#%% descripcion general de las variables
data.info()
#%% tipo de dato
data.dtypes
#%% nombre de las columnas
data.columns
#%% seleccionamos las columnas mas importantes
dataP = data[['CICLO','MES','NOMBRE','TEMA','SECTOR','MONTO']]
#%% mostramos la informacion seleccionada
dataP.head()
#%% cual es el anio con mayor monto registrado
group1 = dataP.groupby('CICLO')
#%% cantidad de registros para cada anio
group1.size()
#%% sumatoria de registros para cada anio
group1.sum()
#%% cantidad de registros en cada mes
group2 = dataP.groupby('MES')
#%% cantidad de registros para cada mes
group2.size()
#%% media
group2.mean()
# la variable ciclo se despliega como una variable númerica
#%% para desplegar la media solo del monto
group2.mean()['MONTO']
"""
Si se desea saber cual es el mes con la media de montos mayor, basta elegir
el máximo de los valores de las medias al ordenarlas. En este caso corresponde al mes de Diciembre
"""
group2['MONTO'].mean().sort_values()[-1:]
#%% agrupamos la informacion por anio y mes para conocer como se comportan los montos
group3 = dataP.groupby(['CICLO', 'MES'])
#%% vemos el valor de la suma de los montos
group3.sum()
#%% para elegir los 5 registros con los montos mas alta, hacemos la siguiente seleccion al group3
group3.sum().sort_values(by='MONTO')[-5:]
#%% se puede revisar como se comporta por CICLO y SECTOR, elegimos los 15 registros con valor
#%% mas alto en la suma de su MONTO
group4 = dataP.groupby(['CICLO', 'SECTOR']).sum().sort_values(by='MONTO')[-15:].plot(kind='bar')
#%%g desplegar grafica
plt.show()
#%% mostrar cantidad, sumatoria, max y min en valores
group1['MONTO'].agg([np.size, sum, max, min])
#%% comparativo entre maximo y minimo
group1['MONTO'].agg([max, min]).plot(kind='bar')
#%% desplegar grafica
plt.show()
#%% se explora como se comporta la agrupacion por mes, mediante una grafica de barras
group2['MONTO'].agg([max, min, np.mean])
#%% los datos originales permiten explorar graficamente como se relacionan los valores de los montos
#%% con respecto a los meses
sns.barplot(data=dataP, x='MES', y='MONTO', palette='PRGn')
#%% desplegar grafica
plt.show()
#%% explorar los datos agrupados por anio-mes
group3['MONTO'].agg([max, min, np.mean]).plot(title='Comportamiento de la suma de los montos')
#%% desplegar grafica
plt.show()
|
{
"content_hash": "608adf68b85d40b4852cf08cfcc69645",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 100,
"avg_line_length": 26.901639344262296,
"alnum_prop": 0.7273004265691652,
"repo_name": "jorgemauricio/INIFAP_Course",
"id": "7b8226720602671e789cbf43046ee7b0b9fe8620",
"size": "3340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algoritmos/algoritmo_exploracion_datos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11052861"
},
{
"name": "Python",
"bytes": "59081"
}
],
"symlink_target": ""
}
|
import os, sys
os.environ["MXNET_KVSTORE_BIGARRAY_BOUND"] = "10"
os.environ["DMLC_NUM_KEYRANGE"] = "12"
os.environ["PS_VAN"] = "zmq"
os.environ["MXNET_MERGE_THRESHOLD"] = "2"
os.environ["MXNET_MERGE_TAU_MILLISECOND"] = "0"
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
from common import find_mxnet
import mxnet as mx
import numpy as np
if __name__ == '__main__':
n_samples = 10000
n_samples_eval = 200
n_features = 100
#True weight
w = np.ones(n_features, dtype='float') / n_features
#Training data
# train_data = np.random.uniform(0, 1, [n_samples, n_features])
train_data = np.arange(n_samples * n_features, dtype='float').reshape((n_samples, n_features)) / n_features / n_samples
train_label = train_data.dot(w)
batch_size = 50
#Evaluation Data
# eval_data = np.random.uniform(0, 1, [n_samples, n_features])
eval_data = np.arange(n_samples_eval * n_features, dtype='float').reshape((n_samples_eval, n_features)) / n_features / n_samples_eval + 0.1
eval_label = eval_data.dot(w)
train_iter = mx.io.NDArrayIter(train_data,train_label, batch_size, shuffle=True,label_name='lin_reg_label')
eval_iter = mx.io.NDArrayIter(eval_data, eval_label, batch_size, shuffle=False)
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden = 1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
model = mx.mod.Module(
symbol = lro ,
data_names=['data'],
label_names = ['lin_reg_label']# network structure
)
# train
kv = mx.kvstore.create('dist_sync')
model.fit(train_iter, eval_iter,
optimizer_params={'learning_rate':0.01, 'momentum': 0.1},
initializer=mx.init.Zero(),
num_epoch=5,
eval_metric='mse',
batch_end_callback = mx.callback.Speedometer(100, 200),
kvstore=kv)
# print(kv.rank)
|
{
"content_hash": "cb15ba22f0b80b455bc251517e944caf",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 143,
"avg_line_length": 37.6,
"alnum_prop": 0.6315280464216635,
"repo_name": "xcgoner/dist-mxnet",
"id": "ecb3ba975e34ca6ec10d762cc509a7f0d46a219b",
"size": "2068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/distributed/lr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12255"
},
{
"name": "C",
"bytes": "93947"
},
{
"name": "C++",
"bytes": "3626978"
},
{
"name": "CMake",
"bytes": "53832"
},
{
"name": "Cuda",
"bytes": "668127"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "20406"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "45204"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "687661"
},
{
"name": "Perl 6",
"bytes": "4176"
},
{
"name": "Python",
"bytes": "3645307"
},
{
"name": "R",
"bytes": "324842"
},
{
"name": "Scala",
"bytes": "884082"
},
{
"name": "Shell",
"bytes": "201121"
}
],
"symlink_target": ""
}
|
"""Master-Slave connection to Mongo.
Performs all writes to Master instance and distributes reads among all
slaves. Reads are tried on each slave in turn until the read succeeds
or all slaves failed.
"""
from pymongo import helpers, thread_util
from pymongo import ReadPreference
from pymongo.common import BaseObject
from pymongo.mongo_client import MongoClient
from pymongo.database import Database
from pymongo.errors import AutoReconnect
class MasterSlaveConnection(BaseObject):
"""A master-slave connection to Mongo.
"""
def __init__(self, master, slaves=[], document_class=dict, tz_aware=False):
"""Create a new Master-Slave connection.
The resultant connection should be interacted with using the same
mechanisms as a regular `MongoClient`. The `MongoClient` instances used
to create this `MasterSlaveConnection` can themselves make use of
connection pooling, etc. `MongoClient` instances used as slaves should
be created with the read_preference option set to
:attr:`~pymongo.read_preferences.ReadPreference.SECONDARY`. Write
concerns are inherited from `master` and can be changed in this
instance.
Raises TypeError if `master` is not an instance of `MongoClient` or
slaves is not a list of at least one `MongoClient` instances.
:Parameters:
- `master`: `MongoClient` instance for the writable Master
- `slaves`: list of `MongoClient` instances for the
read-only slaves
- `document_class` (optional): default class to use for
documents returned from queries on this connection
- `tz_aware` (optional): if ``True``,
:class:`~datetime.datetime` instances returned as values
in a document by this :class:`MasterSlaveConnection` will be timezone
aware (otherwise they will be naive)
"""
if not isinstance(master, MongoClient):
raise TypeError("master must be a MongoClient instance")
if not isinstance(slaves, list) or len(slaves) == 0:
raise TypeError("slaves must be a list of length >= 1")
for slave in slaves:
if not isinstance(slave, MongoClient):
raise TypeError("slave %r is not an instance of MongoClient" %
slave)
super(MasterSlaveConnection,
self).__init__(read_preference=ReadPreference.SECONDARY,
safe=master.safe,
**master.write_concern)
self.__master = master
self.__slaves = slaves
self.__document_class = document_class
self.__tz_aware = tz_aware
self.__request_counter = thread_util.Counter(master.use_greenlets)
@property
def master(self):
return self.__master
@property
def slaves(self):
return self.__slaves
@property
def is_mongos(self):
"""If this MasterSlaveConnection is connected to mongos (always False)
.. versionadded:: 2.3
"""
return False
@property
def use_greenlets(self):
"""Whether calling :meth:`start_request` assigns greenlet-local,
rather than thread-local, sockets.
.. versionadded:: 2.4.2
"""
return self.master.use_greenlets
def get_document_class(self):
return self.__document_class
def set_document_class(self, klass):
self.__document_class = klass
document_class = property(get_document_class, set_document_class,
doc="""Default class to use for documents
returned on this connection.""")
@property
def tz_aware(self):
return self.__tz_aware
def disconnect(self):
"""Disconnect from MongoDB.
Disconnecting will call disconnect on all master and slave
connections.
.. seealso:: Module :mod:`~pymongo.mongo_client`
.. versionadded:: 1.10.1
"""
self.__master.disconnect()
for slave in self.__slaves:
slave.disconnect()
def set_cursor_manager(self, manager_class):
"""Set the cursor manager for this connection.
Helper to set cursor manager for each individual `MongoClient` instance
that make up this `MasterSlaveConnection`.
"""
self.__master.set_cursor_manager(manager_class)
for slave in self.__slaves:
slave.set_cursor_manager(manager_class)
# _connection_to_use is a hack that we need to include to make sure
# that killcursor operations can be sent to the same instance on which
# the cursor actually resides...
def _send_message(self, message,
with_last_error=False, _connection_to_use=None):
"""Say something to Mongo.
Sends a message on the Master connection. This is used for inserts,
updates, and deletes.
Raises ConnectionFailure if the message cannot be sent. Returns the
request id of the sent message.
:Parameters:
- `operation`: opcode of the message
- `data`: data to send
- `safe`: perform a getLastError after sending the message
"""
if _connection_to_use is None or _connection_to_use == -1:
return self.__master._send_message(message, with_last_error)
return self.__slaves[_connection_to_use]._send_message(
message, with_last_error, check_primary=False)
# _connection_to_use is a hack that we need to include to make sure
# that getmore operations can be sent to the same instance on which
# the cursor actually resides...
def _send_message_with_response(self, message, _connection_to_use=None,
_must_use_master=False, **kwargs):
"""Receive a message from Mongo.
Sends the given message and returns a (connection_id, response) pair.
:Parameters:
- `operation`: opcode of the message to send
- `data`: data to send
"""
if _connection_to_use is not None:
if _connection_to_use == -1:
return (-1,
self.__master._send_message_with_response(message,
**kwargs))
else:
return (_connection_to_use,
self.__slaves[_connection_to_use]
._send_message_with_response(message, **kwargs))
# _must_use_master is set for commands, which must be sent to the
# master instance. any queries in a request must be sent to the
# master since that is where writes go.
if _must_use_master or self.in_request():
return (-1, self.__master._send_message_with_response(message,
**kwargs))
# Iterate through the slaves randomly until we have success. Raise
# reconnect if they all fail.
for connection_id in helpers.shuffled(xrange(len(self.__slaves))):
try:
slave = self.__slaves[connection_id]
return (connection_id,
slave._send_message_with_response(message, **kwargs))
except AutoReconnect:
pass
raise AutoReconnect("failed to connect to slaves")
def start_request(self):
"""Start a "request".
Start a sequence of operations in which order matters. Note
that all operations performed within a request will be sent
using the Master connection.
"""
self.__request_counter.inc()
self.master.start_request()
def in_request(self):
return bool(self.__request_counter.get())
def end_request(self):
"""End the current "request".
See documentation for `MongoClient.end_request`.
"""
self.__request_counter.dec()
self.master.end_request()
def __eq__(self, other):
if isinstance(other, MasterSlaveConnection):
us = (self.__master, self.slaves)
them = (other.__master, other.__slaves)
return us == them
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "MasterSlaveConnection(%r, %r)" % (self.__master, self.__slaves)
def __getattr__(self, name):
"""Get a database by name.
Raises InvalidName if an invalid database name is used.
:Parameters:
- `name`: the name of the database to get
"""
return Database(self, name)
def __getitem__(self, name):
"""Get a database by name.
Raises InvalidName if an invalid database name is used.
:Parameters:
- `name`: the name of the database to get
"""
return self.__getattr__(name)
def close_cursor(self, cursor_id, connection_id):
"""Close a single database cursor.
Raises TypeError if cursor_id is not an instance of (int, long). What
closing the cursor actually means depends on this connection's cursor
manager.
:Parameters:
- `cursor_id`: cursor id to close
- `connection_id`: id of the `MongoClient` instance where the cursor
was opened
"""
if connection_id == -1:
return self.__master.close_cursor(cursor_id)
return self.__slaves[connection_id].close_cursor(cursor_id)
def database_names(self):
"""Get a list of all database names.
"""
return self.__master.database_names()
def drop_database(self, name_or_database):
"""Drop a database.
:Parameters:
- `name_or_database`: the name of a database to drop or the object
itself
"""
return self.__master.drop_database(name_or_database)
def __iter__(self):
return self
def next(self):
raise TypeError("'MasterSlaveConnection' object is not iterable")
def _cached(self, database_name, collection_name, index_name):
return self.__master._cached(database_name,
collection_name, index_name)
def _cache_index(self, database_name, collection_name,
index_name, cache_for):
return self.__master._cache_index(database_name, collection_name,
index_name, cache_for)
def _purge_index(self, database_name,
collection_name=None, index_name=None):
return self.__master._purge_index(database_name,
collection_name,
index_name)
|
{
"content_hash": "761300ae13f65af0f2b4acfe6ea614d4",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 81,
"avg_line_length": 36.447811447811446,
"alnum_prop": 0.5939030023094688,
"repo_name": "edisonlz/fruit",
"id": "dbf00fac8cbf2f7ba7f40fef1ea85aaf2e865a85",
"size": "11402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_project/base/site-packages/pymongo/master_slave_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1482"
},
{
"name": "Batchfile",
"bytes": "6714"
},
{
"name": "C",
"bytes": "3085"
},
{
"name": "C++",
"bytes": "4823"
},
{
"name": "CSS",
"bytes": "660927"
},
{
"name": "DIGITAL Command Language",
"bytes": "27853"
},
{
"name": "GAP",
"bytes": "6045"
},
{
"name": "Go",
"bytes": "13616"
},
{
"name": "Groff",
"bytes": "7199"
},
{
"name": "HTML",
"bytes": "7678961"
},
{
"name": "Java",
"bytes": "208173"
},
{
"name": "JavaScript",
"bytes": "2626051"
},
{
"name": "Makefile",
"bytes": "16810"
},
{
"name": "Nginx",
"bytes": "19215"
},
{
"name": "PHP",
"bytes": "205978"
},
{
"name": "Perl",
"bytes": "27627"
},
{
"name": "Python",
"bytes": "15609476"
},
{
"name": "Shell",
"bytes": "13663"
},
{
"name": "TeX",
"bytes": "60714"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.