commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
42e26737d083b82716c3adb8c19fb66a5063dc65 | change version number to v3.0.1 | src/cmdlr/info.py | src/cmdlr/info.py | """Cmdlr infomation files."""
VERSION = '3.0.1'
DESCRIPTION = ('An extensible comic subscriber.')
LICENSE = 'MIT'
AUTHOR = 'Civalin'
AUTHOR_EMAIL = 'larinawf@gmail.com'
PROJECT_URL = 'https://github.com/civalin/cmdlr'
PROJECT_NAME = 'cmdlr'
| """Cmdlr infomation files."""
VERSION = '3.0.0'
DESCRIPTION = ('An extensible comic subscriber.')
LICENSE = 'MIT'
AUTHOR = 'Civalin'
AUTHOR_EMAIL = 'larinawf@gmail.com'
PROJECT_URL = 'https://github.com/civalin/cmdlr'
PROJECT_NAME = 'cmdlr'
| Python | 0.000123 |
49e301ac6a74a30cfdf00bf4178889f9ecb74889 | Patch release for bug-fix #166 | vtki/_version.py | vtki/_version.py | """ version info for vtki """
# major, minor, patch
version_info = 0, 18, 2
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
| """ version info for vtki """
# major, minor, patch
version_info = 0, 18, 1
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
| Python | 0 |
4c6ec1413d1a12165c1231095783aa94d235389a | Add __version__ to vumi package. | vumi/__init__.py | vumi/__init__.py | """
Vumi scalable text messaging engine.
"""
__version__ = "0.5.0a"
| Python | 0.000001 | |
3bb474a4506abb569d5c54703ba3bf2c9c933fd9 | Add tof-server to path | tof-server.wsgi | tof-server.wsgi | import sys
activate_this = '/var/www/tof-server/flask/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
sys.path.append('/var/www/tof-server')
#activator = 'some/path/to/activate_this.py'
#with open(activator) as f:
# exec(f.read(), {'__file__': activator})
from tof_server import app as application | activate_this = '/var/www/tof-server/flask/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
#activator = 'some/path/to/activate_this.py'
#with open(activator) as f:
# exec(f.read(), {'__file__': activator})
from tof_server import app as application | Python | 0.000001 |
5ede219cd4613af0fecbf415030aaa23df1ff3ee | Add test for measurement order | test/core/measurements_test.py | test/core/measurements_test.py | # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test various measurements use cases.
The test cases here need improvement - they should check for things that we
actually care about.
"""
from examples import all_the_things
import openhtf as htf
from openhtf.util import test as htf_test
# Fields that are considered 'volatile' for record comparison.
_VOLATILE_FIELDS = {'start_time_millis', 'end_time_millis', 'timestamp_millis',
'lineno', 'codeinfo', 'code_info', 'descriptor_id'}
class TestMeasurements(htf_test.TestCase):
def test_unit_enforcement(self):
"""Creating a measurement with invalid units should raise."""
self.assertRaises(TypeError, htf.Measurement('bad_units').with_units, 1701)
@htf_test.patch_plugs(user_mock='openhtf.plugs.user_input.UserInput')
def test_chaining_in_measurement_declarations(self, user_mock):
user_mock.prompt.return_value = 'mock_widget'
record = yield all_the_things.hello_world
self.assertNotMeasured(record, 'unset_meas')
self.assertMeasured(record, 'widget_type', 'mock_widget')
self.assertMeasured(record, 'widget_color', 'Black')
self.assertMeasurementPass(record, 'widget_size')
self.assertMeasurementPass(record, 'specified_as_args')
@htf_test.yields_phases
def test_measurements_with_dimenstions(self):
record = yield all_the_things.dimensions
self.assertNotMeasured(record, 'unset_dims')
self.assertMeasured(record, 'dimensions',
[(0, 1), (1, 2), (2, 4), (3, 8), (4, 16)])
self.assertMeasured(record, 'lots_of_dims',
[(1, 21, 101, 123), (2, 22, 102, 126),
(3, 23, 103, 129), (4, 24, 104, 132)])
@htf_test.yields_phases
def test_validator_replacement(self):
record = yield all_the_things.measures_with_args.with_args(min=2, max=4)
self.assertMeasurementFail(record, 'replaced_min_only')
self.assertMeasurementPass(record, 'replaced_max_only')
self.assertMeasurementFail(record, 'replaced_min_max')
record = yield all_the_things.measures_with_args.with_args(min=0, max=5)
self.assertMeasurementPass(record, 'replaced_min_only')
self.assertMeasurementPass(record, 'replaced_max_only')
self.assertMeasurementPass(record, 'replaced_min_max')
record = yield all_the_things.measures_with_args.with_args(min=-1, max=0)
self.assertMeasurementPass(record, 'replaced_min_only')
self.assertMeasurementFail(record, 'replaced_max_only')
self.assertMeasurementFail(record, 'replaced_min_max')
@htf_test.yields_phases
def test_measurement_order(self):
record = yield all_the_things.dimensions
self.assertEqual(record.measurements.keys(),
['unset_dims', 'dimensions', 'lots_of_dims'])
record = yield all_the_things.measures_with_args.with_args(min=2, max=4)
self.assertEqual(record.measurements.keys(),
['replaced_min_only', 'replaced_max_only',
'replaced_min_max'])
| # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test various measurements use cases.
The test cases here need improvement - they should check for things that we
actually care about.
"""
from examples import all_the_things
import openhtf as htf
from openhtf.util import test as htf_test
# Fields that are considered 'volatile' for record comparison.
_VOLATILE_FIELDS = {'start_time_millis', 'end_time_millis', 'timestamp_millis',
'lineno', 'codeinfo', 'code_info', 'descriptor_id'}
class TestMeasurements(htf_test.TestCase):
def test_unit_enforcement(self):
"""Creating a measurement with invalid units should raise."""
self.assertRaises(TypeError, htf.Measurement('bad_units').with_units, 1701)
@htf_test.patch_plugs(user_mock='openhtf.plugs.user_input.UserInput')
def test_chaining_in_measurement_declarations(self, user_mock):
user_mock.prompt.return_value = 'mock_widget'
record = yield all_the_things.hello_world
self.assertNotMeasured(record, 'unset_meas')
self.assertMeasured(record, 'widget_type', 'mock_widget')
self.assertMeasured(record, 'widget_color', 'Black')
self.assertMeasurementPass(record, 'widget_size')
self.assertMeasurementPass(record, 'specified_as_args')
@htf_test.yields_phases
def test_measurements_with_dimenstions(self):
record = yield all_the_things.dimensions
self.assertNotMeasured(record, 'unset_dims')
self.assertMeasured(record, 'dimensions',
[(0, 1), (1, 2), (2, 4), (3, 8), (4, 16)])
self.assertMeasured(record, 'lots_of_dims',
[(1, 21, 101, 123), (2, 22, 102, 126),
(3, 23, 103, 129), (4, 24, 104, 132)])
@htf_test.yields_phases
def test_validator_replacement(self):
record = yield all_the_things.measures_with_args.with_args(min=2, max=4)
self.assertMeasurementFail(record, 'replaced_min_only')
self.assertMeasurementPass(record, 'replaced_max_only')
self.assertMeasurementFail(record, 'replaced_min_max')
record = yield all_the_things.measures_with_args.with_args(min=0, max=5)
self.assertMeasurementPass(record, 'replaced_min_only')
self.assertMeasurementPass(record, 'replaced_max_only')
self.assertMeasurementPass(record, 'replaced_min_max')
record = yield all_the_things.measures_with_args.with_args(min=-1, max=0)
self.assertMeasurementPass(record, 'replaced_min_only')
self.assertMeasurementFail(record, 'replaced_max_only')
self.assertMeasurementFail(record, 'replaced_min_max')
| Python | 0 |
2a246c78a4506de5b4fcc55ff0257182142a4436 | Complete nums & num->pos dict sol w/ time/space complexity | lc0380_insert_delete_getrandom_o1.py | lc0380_insert_delete_getrandom_o1.py | """Leetcode 380. Insert Delete GetRandom O(1)
Medium
URL: https://leetcode.com/problems/insert-delete-getrandom-o1/
Design a data structure that supports all following operations in average O(1)
time.
- insert(val): Inserts an item val to the set if not already present.
- remove(val): Removes an item val from the set if present.
- getRandom: Returns a random element from current set of elements. Each element
must have the same probability of being returned.
Example:
// Init an empty set.
RandomizedSet randomSet = new RandomizedSet();
// Inserts 1 to the set. Returns true as 1 was inserted successfully.
randomSet.insert(1);
// Returns false as 2 does not exist in the set.
randomSet.remove(2);
// Inserts 2 to the set, returns true. Set now contains [1,2].
randomSet.insert(2);
// getRandom should return either 1 or 2 randomly.
randomSet.getRandom();
// Removes 1 from the set, returns true. Set now contains [2].
randomSet.remove(1);
// 2 was already in the set, so return false.
randomSet.insert(2);
// Since 2 is the only number in the set, getRandom always return 2.
randomSet.getRandom();
Your RandomizedSet object will be instantiated and called as such:
randomSet = RandomizedSet()
randomSet = randomSet.insert(val)
randomSet = randomSet.remove(val)
randomSet = randomSet.getRandom()
"""
class RandomizedSetNumsAndNumPosDict(object):
def __init__(self):
"""
Initialize your data structure here.
"""
from collections import defaultdict
# Use list to store nums and dict num->pos.
self.nums = []
self.num_pos = defaultdict()
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not
already contain the specified element.
:type val: int
:rtype: bool
Time complexity: O(1).
Space complexity: O(n).
"""
if val not in self.num_pos:
self.nums.append(val)
self.num_pos[val] = len(self.nums) - 1
return True
return False
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained
the specified element.
:type val: int
:rtype: bool
Time complexity: O(1).
Space complexity: O(n).
"""
if val in self.num_pos:
# Get val's position.
val_pos = self.num_pos[val]
# Replace val by the last num.
last_num = self.nums[-1]
self.nums[val_pos] = last_num
self.num_pos[last_num] = val_pos
# Remove val from nums and num_pos.
self.nums.pop()
del self.num_pos[val]
return True
return False
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
Time complexity: O(1).
Space complexity: O(1).
"""
import random
return random.choice(self.nums)
def main():
randomSet = RandomizedSetNumsAndNumPosDict()
# Inserts 1 to the set. Returns true as 1 was inserted successfully.
print randomSet.insert(1);
# Returns false as 2 does not exist in the set.
print randomSet.remove(2);
# Inserts 2 to the set, returns true. Set now contains [1,2].
print randomSet.insert(2);
# getRandom should return either 1 or 2 randomly.
print randomSet.getRandom();
# Removes 1 from the set, returns true. Set now contains [2].
print randomSet.remove(1);
# 2 was already in the set, so return false.
print randomSet.insert(2);
# Since 2 is the only number in the set, getRandom always return 2.
print randomSet.getRandom();
print randomSet.getRandom();
if __name__ == '__main__':
main()
| """Leetcode 380. Insert Delete GetRandom O(1)
Medium
URL: https://leetcode.com/problems/insert-delete-getrandom-o1/
Design a data structure that supports all following operations in average O(1)
time.
- insert(val): Inserts an item val to the set if not already present.
- remove(val): Removes an item val from the set if present.
- getRandom: Returns a random element from current set of elements. Each element
must have the same probability of being returned.
Example:
// Init an empty set.
RandomizedSet randomSet = new RandomizedSet();
// Inserts 1 to the set. Returns true as 1 was inserted successfully.
randomSet.insert(1);
// Returns false as 2 does not exist in the set.
randomSet.remove(2);
// Inserts 2 to the set, returns true. Set now contains [1,2].
randomSet.insert(2);
// getRandom should return either 1 or 2 randomly.
randomSet.getRandom();
// Removes 1 from the set, returns true. Set now contains [2].
randomSet.remove(1);
// 2 was already in the set, so return false.
randomSet.insert(2);
// Since 2 is the only number in the set, getRandom always return 2.
randomSet.getRandom();
Your RandomizedSet object will be instantiated and called as such:
obj = RandomizedSet()
param_1 = obj.insert(val)
param_2 = obj.remove(val)
param_3 = obj.getRandom()
"""
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
pass
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
pass
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
pass
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000001 |
ff6d9647b72c2101480f170e55ee28fd0cb37d11 | add MME tag by default | xbrowse_server/base/management/commands/add_default_tags.py | xbrowse_server/base/management/commands/add_default_tags.py | import sys
from optparse import make_option
from xbrowse_server import xbrowse_controls
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project, ProjectTag, Family
def get_or_create_project_tag(project, tag_name, description, color='#1f78b4'):
"""
Gets or creates a particular ProjectTag in a given project.
Args:
project: The project that contains this tag
tag_name: The name of the new tag (can contain spaces) (eg. "Causal Variant")
description: (eg. "causal variant")
Returns:
new ProjectTag model object (or an existing one if a match was found)
"""
project_tag, created = ProjectTag.objects.get_or_create(project=project, tag=tag_name)
if created:
print("Created new tag: %s : %s" % (project, tag_name))
project_tag.title=description
project_tag.color=color
project_tag.save()
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
parser.add_argument('-p', '--print-tags', help="Print what tags are bieng used", action="store_true")
def handle(self, *args, **options):
if len(args) < 1:
sys.exit("ERROR: must specify 1 or more project_ids on the command line")
project_ids = args
if options["print_tags"]:
for project in Project.objects.all():
print("========")
users = list(project.get_users())
if users and len(ProjectTag.objects.filter(project=project, tag='VUS')) == 0:
print("##### " + project.project_id + " #### " + ",".join(map(str, users)) + ", " + ("%s families" % len(Family.objects.filter(project=project))))
for project_tag in ProjectTag.objects.filter(project=project):
print(project_tag.tag + ": " + project_tag.title)
for project_id in project_ids:
project = Project.objects.get(project_id=project_id)
get_or_create_project_tag(project, tag_name="Review", description="", color='#88CCDD') # blue
get_or_create_project_tag(project, tag_name="Incidental Finding", description="", color='#FFAA33')
get_or_create_project_tag(project, tag_name="Novel Gene", description="", color='#FF0000') # 4C0083
get_or_create_project_tag(project, tag_name="Known Gene Phenotype Expansion", description="", color='#5521CC')
get_or_create_project_tag(project, tag_name="Known Gene for Phenotype", description="", color='#2177DD')
get_or_create_project_tag(project, tag_name="Pathogenic", description="Potential candidate gene", color='#AA1111') # red
get_or_create_project_tag(project, tag_name="Likely Pathogenic", description="Likely pathogenic", color='#FF9988') # light red
get_or_create_project_tag(project, tag_name="VUS", description="Uncertain significance", color='#AAAAAA') # gray
get_or_create_project_tag(project, tag_name="Likely Benign", description="Likely Benign", color='#B2DF8A') # light green
get_or_create_project_tag(project, tag_name="Benign", description="Strong evidence", color='#11AA11') # green
get_or_create_project_tag(project, tag_name="MME", description="Match Maker Exchange", color='#ff7f00')
print("Done")
"""
Review Review
Incidental Finding Incidental finding
Known Gene for Phenotype Known gene for phenotype
Known Gene Phenotype Expansion Known gene phenotype expansion
Novel Gene Novel gene
Pathogenic Potential candidate gene
Likely Pathogenic Moderate and supporting evidence
VUS Uncertain significance
Likely Benign Moderate and supporting evidence
Benign Strong evidence
"""
| import sys
from optparse import make_option
from xbrowse_server import xbrowse_controls
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project, ProjectTag, Family
def get_or_create_project_tag(project, tag_name, description, color='#1f78b4'):
"""
Gets or creates a particular ProjectTag in a given project.
Args:
project: The project that contains this tag
tag_name: The name of the new tag (can contain spaces) (eg. "Causal Variant")
description: (eg. "causal variant")
Returns:
new ProjectTag model object (or an existing one if a match was found)
"""
project_tag, created = ProjectTag.objects.get_or_create(project=project, tag=tag_name)
if created:
print("Created new tag: %s : %s" % (project, tag_name))
project_tag.title=description
project_tag.color=color
project_tag.save()
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
parser.add_argument('-p', '--print-tags', help="Print what tags are bieng used", action="store_true")
def handle(self, *args, **options):
if len(args) < 1:
sys.exit("ERROR: must specify 1 or more project_ids on the command line")
project_ids = args
if options["print_tags"]:
for project in Project.objects.all():
print("========")
users = list(project.get_users())
if users and len(ProjectTag.objects.filter(project=project, tag='VUS')) == 0:
print("##### " + project.project_id + " #### " + ",".join(map(str, users)) + ", " + ("%s families" % len(Family.objects.filter(project=project))))
for project_tag in ProjectTag.objects.filter(project=project):
print(project_tag.tag + ": " + project_tag.title)
for project_id in project_ids:
project = Project.objects.get(project_id=project_id)
get_or_create_project_tag(project, tag_name="Review", description="", color='#88CCDD') # blue
get_or_create_project_tag(project, tag_name="Incidental Finding", description="", color='#FFAA33')
get_or_create_project_tag(project, tag_name="Novel Gene", description="", color='#FF0000') # 4C0083
get_or_create_project_tag(project, tag_name="Known Gene Phenotype Expansion", description="", color='#5521CC')
get_or_create_project_tag(project, tag_name="Known Gene for Phenotype", description="", color='#2177DD')
get_or_create_project_tag(project, tag_name="Pathogenic", description="Potential candidate gene", color='#AA1111') # red
get_or_create_project_tag(project, tag_name="Likely Pathogenic", description="Likely pathogenic", color='#FF9988') # light red
get_or_create_project_tag(project, tag_name="VUS", description="Uncertain significance", color='#AAAAAA') # gray
get_or_create_project_tag(project, tag_name="Likely Benign", description="Likely Benign", color='#B2DF8A') # light green
get_or_create_project_tag(project, tag_name="Benign", description="Strong evidence", color='#11AA11') # green
print("Done")
"""
Review Review
Incidental Finding Incidental finding
Known Gene for Phenotype Known gene for phenotype
Known Gene Phenotype Expansion Known gene phenotype expansion
Novel Gene Novel gene
Pathogenic Potential candidate gene
Likely Pathogenic Moderate and supporting evidence
VUS Uncertain significance
Likely Benign Moderate and supporting evidence
Benign Strong evidence
"""
| Python | 0 |
66c33c880d1e5f20a23e01937f8c88f5b66bfc5c | fix SQL error on non existing column | addons/website_membership/models/membership.py | addons/website_membership/models/membership.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class MembershipLine(models.Model):
_inherit = 'membership.membership_line'
def get_published_companies(self, limit=None):
if not self.ids:
return []
limit_clause = '' if limit is None else ' LIMIT %d' % limit
self.env.cr.execute("""
SELECT DISTINCT p.id
FROM res_partner p INNER JOIN membership_membership_line m
ON p.id = m.partner
WHERE is_published AND is_company AND m.id IN %s """ + limit_clause, (tuple(self.ids),))
return [partner_id[0] for partner_id in self.env.cr.fetchall()]
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class MembershipLine(models.Model):
_inherit = 'membership.membership_line'
def get_published_companies(self, limit=None):
if not self.ids:
return []
limit_clause = '' if limit is None else ' LIMIT %d' % limit
self.env.cr.execute("""
SELECT DISTINCT p.id
FROM res_partner p INNER JOIN membership_membership_line m
ON p.id = m.partner
WHERE website_published AND is_company AND m.id IN %s """ + limit_clause, (tuple(self.ids),))
return [partner_id[0] for partner_id in self.env.cr.fetchall()]
| Python | 0.000272 |
d5cb2a37ea77b15c5725d6ebf8e0ab79f3bea613 | Fix interface in historian service interface | flow_workflow/historian/service_interface.py | flow_workflow/historian/service_interface.py | import logging
from flow_workflow.historian.messages import UpdateMessage
LOG = logging.getLogger(__name__)
class WorkflowHistorianServiceInterface(object):
def __init__(self,
broker=None,
exchange=None,
routing_key=None):
self.broker = broker
self.exchange = exchange
self.routing_key = routing_key
def update(self, net_key, operation_id, name, workflow_plan_id, **kwargs):
if workflow_plan_id < 0:
# ignore update (don't even make message)
LOG.debug("Received negative workflow_plan_id:%s, "
"ignoring update (net_key=%s, operation_id=%s, name=%s,"
"workflow_plan_id=%s, kwargs=%s)",
workflow_plan_id, net_key, peration_id, name,
workflow_plan_id, kwargs)
else:
LOG.debug("Sending update (net_key=%s, operation_id=%s, name=%s,"
"workflow_plan_id=%s, kwargs=%s)",
net_key, operation_id, name, workflow_plan_id, kwargs)
message = UpdateMessage(net_key=net_key, operation_id=operation_id,
name=name, workflow_plan_id=workflow_plan_id,
**kwargs)
self.broker.publish(self.exchange, self.routing_key, message)
| import logging
from flow_workflow.historian.messages import UpdateMessage
LOG = logging.getLogger(__name__)
class WorkflowHistorianServiceInterface(object):
def __init__(self,
broker=None,
exchange=None,
routing_key=None):
self.broker = broker
self.exchange = exchange
self.routing_key = routing_key
def update(self, net_key, operation_id, name, workflow_plan_id, **kwargs):
if workflow_plan_id < 0:
# ignore update (don't even make message)
LOG.debug("Received negative workflow_plan_id:%s, "
"ignoring update (net_key=%s, operation_id=%s, name=%s,"
"workflow_plan_id=%s, kwargs=%s)",
workflow_plan_id, net_key, peration_id, name,
workflow_plan_id, kwargs)
else:
LOG.debug("Sending update (net_key=%s, operation_id=%s, name=%s,"
"workflow_plan_id=%s, kwargs=%s)",
net_key, peration_id, name, workflow_plan_id, kwargs)
message = UpdateMessage(net_key=net_key, operation_id=operation_id,
**kwargs)
self.broker.publish(self.exchange, self.routing_key, message)
| Python | 0.000004 |
26f5adea28f81ebbe830d4a207958320e0b40520 | update version | gtfparse/__init__.py | gtfparse/__init__.py | # Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .attribute_parsing import expand_attribute_strings
from .create_missing_features import create_missing_features
from .line_parsing import parse_gtf_lines
from .required_columns import REQUIRED_COLUMNS
from .parsing_error import ParsingError
from .read_gtf import read_gtf_as_dataframe, read_gtf_as_dict
__version__ = "0.2.3"
__all__ = [
"expand_attribute_strings",
"create_missing_features",
"parse_gtf_lines",
"REQUIRED_COLUMNS",
"ParsingError",
"read_gtf_as_dataframe",
"read_gtf_as_dict",
]
| # Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .attribute_parsing import expand_attribute_strings
from .create_missing_features import create_missing_features
from .line_parsing import parse_gtf_lines
from .required_columns import REQUIRED_COLUMNS
from .parsing_error import ParsingError
from .read_gtf import read_gtf_as_dataframe, read_gtf_as_dict
__version__ = "0.2.2"
__all__ = [
"expand_attribute_strings",
"create_missing_features",
"parse_gtf_lines",
"REQUIRED_COLUMNS",
"ParsingError",
"read_gtf_as_dataframe",
"read_gtf_as_dict",
]
| Python | 0 |
85dc28b44def27658e282d621749598ec80ea420 | Fix typo | ambari-server/src/main/python/TeardownAgent.py | ambari-server/src/main/python/TeardownAgent.py | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import logging
import os
import subprocess
def exec_os_command(os_command):
os_stat = subprocess.Popen(os_command, stdout=subprocess.PIPE)
return {
"exitstatus": os_stat.returncode,
"log": os_stat.communicate(0)
}
def is_suse():
"""Return true if the current OS is Suse Linux, false otherwise"""
if os.path.isfile("/etc/issue"):
if "suse" in open("/etc/issue").read().lower():
return True
return False
def teardown_agent_suse():
""" Run zypper remove"""
zypper_command = ["zypper", "remove", "-y", "ambari-agent"]
return exec_os_command(zypper_command)['exitstatus']
def teardown_agent():
""" Run yum remove"""
rpm_command = ["yum", "-y", "remove", "ambari-agent"]
return exec_os_command(rpm_command)['exitstatus']
def parse_args(argv):
onlyargs = argv[1:]
pass_phrase = onlyargs[0]
hostname = onlyargs[1]
project_version = None
if len(onlyargs) > 2:
project_version = onlyargs[2]
if project_version is None or project_version == "null":
project_version = ""
if project_version != "":
project_version = "-" + project_version
return (pass_phrase, hostname, project_version)
def main(argv=None):
script_dir = os.path.realpath(os.path.dirname(argv[0]))
(pass_phrase, hostname, project_version) = parse_args(argv)
exec_os_command(["ambari-agent", "stop"])
exec_os_command(["ambari-agent", "unregister"])
if is_suse():
exit_code = teardown_agent_suse()
else:
exit_code = teardown_agent()
sys.exit(exit_code)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main(sys.argv)
| #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import logging
import os
import subprocess
def exec_os_command(os_command):
os_stat = subprocess.Popen(os_command, stdout=subprocess.PIPE)
return {
"exitstatus": os.stat.returncode,
"log": os_stat.communicate(0)
}
def is_suse():
"""Return true if the current OS is Suse Linux, false otherwise"""
if os.path.isfile("/etc/issue"):
if "suse" in open("/etc/issue").read().lower():
return True
return False
def teardown_agent_suse():
""" Run zypper remove"""
zypper_command = ["zypper", "remove", "-y", "ambari-agent"]
return exec_os_command(zypper_command)['exitstatus']
def teardown_agent():
""" Run yum remove"""
rpm_command = ["yum", "-y", "remove", "ambari-agent"]
return exec_os_command(rpm_command)['exitstatus']
def parse_args(argv):
onlyargs = argv[1:]
pass_phrase = onlyargs[0]
hostname = onlyargs[1]
project_version = None
if len(onlyargs) > 2:
project_version = onlyargs[2]
if project_version is None or project_version == "null":
project_version = ""
if project_version != "":
project_version = "-" + project_version
return (pass_phrase, hostname, project_version)
def main(argv=None):
script_dir = os.path.realpath(os.path.dirname(argv[0]))
(pass_phrase, hostname, project_version) = parse_args(argv)
exec_os_command(["ambari-agent", "stop"])
exec_os_command(["ambari-agent", "unregister"])
if is_suse():
exit_code = teardown_agent_suse()
else:
exit_code = teardown_agent()
sys.exit(exit_code)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main(sys.argv)
| Python | 0.999999 |
31f55658d7495bf1fed8a5a466ffe54502a0348c | Make countersong check for language-dependent spells | tpdatasrc/tpgamefiles/scr/tpModifiers/countersong.py | tpdatasrc/tpgamefiles/scr/tpModifiers/countersong.py | from templeplus.pymod import PythonModifier
from toee import *
import tpdp
def Remove(char, args, evt_obj):
if evt_obj.is_modifier('Countersong'):
args.condition_remove()
return 0
# built-in hook only checks for Sonic descriptor
def Lang(char, args, evt_obj):
lang = 1 << (D20STD_F_SPELL_DESCRIPTOR_LANGUAGE_DEPENDENT-1)
sonic = 1 << (D20S%D_F_SPELL_DESCRIPTOR_SONIC-1)
if (evt_obj.flags & lang) and not (evt_obj.flags & sonic):
perform = args.get_arg(1)
save_bonus = evt_obj.bonus_list.get_sum()
delta = perform - save_bonus - evt_obj.roll_result
if delta > 0:
evt_obj.bonus_list.add(delta, 0, 192)
return 0
countersong = PythonModifier()
countersong.ExtendExisting('Countersong')
countersong.AddHook(ET_OnConditionAddPre, EK_NONE, Remove, ())
countersong.AddHook(ET_OnCountersongSaveThrow, EK_NONE, Lang, ())
| from templeplus.pymod import PythonModifier
from toee import *
import tpdp
def Remove(char, args, evt_obj):
if evt_obj.is_modifier('Countersong'):
args.condition_remove()
return 0
countersong = PythonModifier()
countersong.ExtendExisting('Countersong')
countersong.AddHook(ET_OnConditionAddPre, EK_NONE, Remove, ())
| Python | 0 |
e535def2bc9b7de203e1fd37fc592cdeed1be526 | fix selection bug | src/choose.py | src/choose.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# @nolint
from __future__ import print_function
import curses
import pickle
import sys
import os
import output
import screenControl
import logger
import format
PICKLE_FILE = '~/.fbPager.pickle'
SELECTION_PICKLE = '~/.fbPager.selection.pickle'
LOAD_SELECTION_WARNING = '''
WARNING! Loading the standard input and previous selection
failed. This is probably due to a backwards compatibility issue
with upgrading PathPicker or an internal error. Please pipe
a new set of input to PathPicker to start fresh (after which
this error will go away)
'''
def doProgram(stdscr):
output.clearFile()
logger.clearFile()
lineObjs = getLineObjs()
screen = screenControl.Controller(stdscr, lineObjs)
screen.control()
def getLineObjs():
filePath = os.path.expanduser(PICKLE_FILE)
try:
lineObjs = pickle.load(open(filePath, 'rb'))
except:
output.appendError(LOAD_SELECTION_WARNING)
sys.exit(1)
logger.addEvent('total_num_files', len(lineObjs.items()))
selectionPath = os.path.expanduser(SELECTION_PICKLE)
if os.path.isfile(selectionPath):
setSelectionsFromPickle(selectionPath, lineObjs)
matches = [lineObj for i, lineObj in lineObjs.items()
if not lineObj.isSimple()]
if not len(matches):
output.writeToFile('echo "No lines matched!!"')
sys.exit(0)
return lineObjs
def setSelectionsFromPickle(selectionPath, lineObjs):
try:
selectedIndices = pickle.load(open(selectionPath, 'rb'))
except:
output.appendError(LOAD_SELECTION_WARNING)
sys.exit(1)
for index in selectedIndices:
if index >= len(lineObjs.items()):
error = 'Found index %d more than total matches' % index
output.appendError(error)
continue
toSelect = lineObjs[index]
if isinstance(toSelect, format.LineMatch):
lineObjs[index].setSelect(True)
else:
error = 'Line %d was selected but is not LineMatch' % index
output.appendError(error)
if __name__ == '__main__':
if not os.path.exists(os.path.expanduser(PICKLE_FILE)):
print('Nothing to do!')
output.writeToFile('echo ":D"')
sys.exit(0)
output.clearFile()
curses.wrapper(doProgram)
| # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# @nolint
from __future__ import print_function
import curses
import pickle
import sys
import os
import output
import screenControl
import logger
PICKLE_FILE = '~/.fbPager.pickle'
SELECTION_PICKLE = '~/.fbPager.selection.pickle'
LOAD_SELECTION_WARNING = '''
WARNING! Loading the standard input and previous selection
failed. This is probably due to a backwards compatibility issue
with upgrading PathPicker or an internal error. Please pipe
a new set of input to PathPicker to start fresh (after which
this error will go away)
'''
def doProgram(stdscr):
output.clearFile()
logger.clearFile()
lineObjs = getLineObjs()
screen = screenControl.Controller(stdscr, lineObjs)
screen.control()
def getLineObjs():
filePath = os.path.expanduser(PICKLE_FILE)
try:
lineObjs = pickle.load(open(filePath, 'rb'))
except:
output.appendError(LOAD_SELECTION_WARNING)
sys.exit(1)
logger.addEvent('total_num_files', len(lineObjs.items()))
selectionPath = os.path.expanduser(SELECTION_PICKLE)
if os.path.isfile(selectionPath):
setSelectionsFromPickle(lineObjs)
matches = [lineObj for i, lineObj in lineObjs.items()
if not lineObj.isSimple()]
if not len(matches):
output.writeToFile('echo "No lines matched!!"')
sys.exit(0)
return lineObjs
def setSelectionsFromPickle(lineObjs):
try:
selectedIndices = pickle.load(open(selectionPath, 'rb'))
except:
output.appendError(LOAD_SELECTION_WARNING)
sys.exit(1)
for index in selectedIndices:
if index >= len(lineObjs.items()):
error = 'Found index %d more than total matches' % index
output.appendError(error)
continue
toSelect = lineObjs[index]
if isinstance(toSelect, format.LineMatch):
lineObjs[index].setSelect(True)
else:
error = 'Line %d was selected but is not LineMatch' % index
output.appendError(error)
if __name__ == '__main__':
if not os.path.exists(os.path.expanduser(PICKLE_FILE)):
print('Nothing to do!')
output.writeToFile('echo ":D"')
sys.exit(0)
output.clearFile()
curses.wrapper(doProgram)
| Python | 0 |
ec7150144682afb1f64cd3ba3713207912820264 | Remove add_station reference. | agent/manager.py | agent/manager.py | import json
import logging
import threading
import time
import pika
from db.db import session
from db.models import Metric, WeatherStation
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class AgentManager(object):
def __init__(self, cfg):
self.connection = None
self.channel = None
self.cfg = cfg
def connect(self, forever=True):
while True:
try:
parameters = pika.ConnectionParameters(host=str(self.cfg['broker']),
port=int(self.cfg['port']),
virtual_host='/')
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.fan_out() # Perodic.
break
except Exception as e:
if not forever:
raise
LOGGER.error("Connection failed. Trying again...")
time.sleep(1)
continue
def stop(self):
if self.connection:
self.connection.close()
def _publish(self, msg):
try:
published = self.channel.basic_publish(self.cfg['exchange'],
self.cfg['routingKey'], msg,
pika.BasicProperties(
content_type='application/json',
delivery_mode=2), # Persistent,
mandatory=True
)
if not published:
raise
except Exception as e:
LOGGER.error('Error %s when sending message.', str(e))
raise
def publish_station(self, station):
msg_dict = {
"action": "add_station",
"data": {
"id": station.id,
"name": station.name,
"latitude": station.latitude,
"longitude": station.longitude,
"metric_types": [mt.id for mt in station.metric_types],
}
}
msg = json.dumps(msg_dict)
self._publish(msg)
def publish_metric(self, metric):
msg_dict = {
"action": "add_metric",
"data": {
"id": metric.id,
"value": metric.value,
"metric_type_id": metric.metric_type_id,
"weather_station_id": metric.weather_station_id,
}
}
msg = json.dumps(msg_dict)
self._publish(msg)
def fan_out(self, period=30):
LOGGER.debug('Fanning out rows...')
stations = session.query(WeatherStation).filter_by(is_sent=False).all()
for station in stations:
session.begin()
try:
self.publish_station(station)
station.is_sent = True
session.commit()
except Exception as e:
LOGGER.error('Error %s when processing station.', str(e))
session.rollback()
raise
metrics = session.query(Metric).filter_by(is_sent=False).all()
for metric in metrics:
session.begin()
try:
self.publish_metric(metric)
metric.is_sent = True
session.commit()
except Exception as e:
LOGGER.error('Error %s when processing metric.', str(e))
session.rollback()
raise
threading.Timer(period, self.fan_out).start() # Periodic loop.
def run(self):
self.connect()
def main():
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
cfg = json.load(open("config.json"))
manager = AgentManager(cfg)
try:
manager.run()
except KeyboardInterrupt:
manager.stop()
if __name__ == '__main__':
main()
| import json
import logging
import threading
import time
import pika
from db.db import session
from db.models import Metric, WeatherStation
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class AgentManager(object):
def __init__(self, cfg):
self.connection = None
self.channel = None
self.cfg = cfg
def connect(self, forever=True):
while True:
try:
parameters = pika.ConnectionParameters(host=str(self.cfg['broker']),
port=int(self.cfg['port']),
virtual_host='/')
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.fan_out() # Perodic.
break
except Exception as e:
if not forever:
raise
LOGGER.error("Connection failed. Trying again...")
time.sleep(1)
continue
def stop(self):
if self.connection:
self.connection.close()
def _publish(self, msg):
try:
published = self.channel.basic_publish(self.cfg['exchange'],
self.cfg['routingKey'], msg,
pika.BasicProperties(
content_type='application/json',
delivery_mode=2), # Persistent,
mandatory=True
)
if not published:
raise
except Exception as e:
LOGGER.error('Error %s when sending message.', str(e))
raise
def publish_station(self, station):
msg_dict = {
"action": "add_station",
"data": {
"id": station.id,
"name": station.name,
"latitude": station.latitude,
"longitude": station.longitude,
"metric_types": [mt.id for mt in station.metric_types],
}
}
msg = json.dumps(msg_dict)
self._publish(msg)
def publish_metric(self, metric):
msg_dict = {
"action": "add_metric",
"data": {
"id": metric.id,
"value": metric.value,
"metric_type_id": metric.metric_type_id,
"weather_station_id": metric.weather_station_id,
}
}
msg = json.dumps(msg_dict)
self._publish(msg)
def fan_out(self, period=30):
LOGGER.debug('Fanning out rows...')
stations = session.query(WeatherStation).filter_by(is_sent=False).all()
for station in stations:
session.begin()
try:
self.publish_station(station)
station.is_sent = True
session.commit()
except Exception as e:
LOGGER.error('Error %s when processing station.', str(e))
session.rollback()
raise
metrics = session.query(Metric).filter_by(is_sent=False).all()
for metric in metrics:
session.begin()
try:
self.publish_metric(metric)
metric.is_sent = True
session.commit()
except Exception as e:
LOGGER.error('Error %s when processing metric.', str(e))
session.rollback()
raise
threading.Timer(period, self.fan_out).start() # Periodic loop.
def run(self):
self.add_station()
self.connect()
def main():
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
cfg = json.load(open("config.json"))
manager = AgentManager(cfg)
try:
manager.run()
except KeyboardInterrupt:
manager.stop()
if __name__ == '__main__':
main()
| Python | 0 |
0a44fc07efb902912e22e72979f69fbab200cd32 | Update version 0.6.8 -> 0.6.9 | dimod/package_info.py | dimod/package_info.py | __version__ = '0.6.9'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| __version__ = '0.6.8'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| Python | 0 |
6d86e8565a9ea1aac07b8a1470e2f3b724b981c2 | fix for use on python 2.1 | Lib/bsddb/test/test_misc.py | Lib/bsddb/test/test_misc.py | """Miscellaneous bsddb module test cases
"""
import os
import sys
import unittest
try:
# For Python 2.3
from bsddb import db, dbshelve
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
#----------------------------------------------------------------------
class MiscTestCase(unittest.TestCase):
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except OSError:
pass
def tearDown(self):
try:
os.remove(self.filename)
except OSError:
pass
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
def test01_badpointer(self):
dbs = dbshelve.open(self.filename)
dbs.close()
self.assertRaises(db.DBError, dbs.get, "foo")
def test02_db_home(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
assert env.db_home is None
env.open(self.homeDir, db.DB_CREATE)
assert self.homeDir == env.db_home
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(MiscTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| """Miscellaneous bsddb module test cases
"""
import os
import sys
import unittest
try:
# For Python 2.3
from bsddb import db, dbshelve
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
from test.test_support import verbose
#----------------------------------------------------------------------
class MiscTestCase(unittest.TestCase):
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except OSError:
pass
def tearDown(self):
try:
os.remove(self.filename)
except OSError:
pass
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
def test01_badpointer(self):
dbs = dbshelve.open(self.filename)
dbs.close()
self.assertRaises(db.DBError, dbs.get, "foo")
def test02_db_home(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
assert env.db_home is None
env.open(self.homeDir, db.DB_CREATE)
assert self.homeDir == env.db_home
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(MiscTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| Python | 0.000002 |
d7b8186f0f4115307753d0aef038ec61155c83bc | Fix typo in python | Test/Test-IO/python/TestIO.py | Test/Test-IO/python/TestIO.py | #!/usr/bin/python
import timeit, sys, io
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
def start(file, outfile):
input = open(file, 'r')
output = open(outfile, 'w')
line = input.readline()
while line:
line = line.replace('Tellus', 'Terra')
line = line.replace('tellus', 'terra')
output.write(line)
line = input.readline()
def main(argv):
file = 'dump.txt'
output = 'res.txt'
for i in range(len(argv)):
if argv[i] == '-f':
i = i + 1
file = argv[i]
elif argv[i] == '-o':
i = i + 1
output = argv[i]
#ns = time.time()
wrapped = wrapper(start, file, output)
print (timeit.timeit(wrapped, number=1)*1000)
#totaltime = (time.time() - ns) / 1000000
#print (totaltime)
sys.exit(0)
if __name__ == '__main__':main(sys.argv[1:])
| #!/usr/bin/python
import timeit, sys, io
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
def start(file, outfile):
#input = open(file, 'r')
#output = open(outfile, 'w')
line = input.readline()
while line:
line = line.replace('Tellus', 'Terra')
line = line.replace('tellus', 'terra')
output.write(line)
line = input.readline()
def main(argv):
file = 'dump.txt'
output = 'res.txt'
for i in range(len(argv)):
if argv[i] == '-f':
i = i + 1
file = argv[i]
elif argv[i] == '-o':
i = i + 1
output = argv[i]
#ns = time.time()
wrapped = wrapper(start, file, output)
print (timeit.timeit(wrapped, number=1)*1000)
#totaltime = (time.time() - ns) / 1000000
#print (totaltime)
sys.exit(0)
if __name__ == '__main__':main(sys.argv[1:])
| Python | 0.999998 |
a9b27bc7c3821536657405790f38532db473b92c | Fix bug in recent shelves views | books/views/shelf_views.py | books/views/shelf_views.py | from datetime import timedelta
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.shortcuts import (
get_object_or_404,
redirect,
)
from django.utils import timezone
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
View,
)
from books.models import (
Book,
BookOnShelf,
Shelf,
)
from books.views import (
SEARCH_UPDATE_MESSAGE,
LibraryMixin,
)
from readers.models import Reader
class ShelfListView(LibraryMixin, ListView):
model = Shelf
template_name = "shelf_list.html"
class ShelfView(LibraryMixin, DetailView):
model = Shelf
template_name = "shelf.html"
def get_context_data(self, **kwargs):
context = super(ShelfView, self).get_context_data(**kwargs)
context['books'] = [
book_on_shelf.book for book_on_shelf in BookOnShelf.objects.filter(
shelf=self.get_object()
)
]
return context
class CreateShelfView(LibraryMixin, CreateView):
model = Shelf
template_name = "shelf_edit.html"
fields = ['name',]
def get_success_url(self):
return reverse('shelf-list')
def get_context_data(self, **kwargs):
context = super(CreateShelfView, self).get_context_data(**kwargs)
context['action'] = reverse('shelf-create')
return context
class EditShelfView(LibraryMixin, UpdateView):
model = Shelf
template_name = "shelf_edit.html"
fields = ['name',]
def get_success_url(self):
return reverse(
'shelf-detail',
kwargs={'pk': self.object.id}
)
def get_context_data(self, **kwargs):
context = super(EditShelfView, self).get_context_data(**kwargs)
context['action'] = reverse(
'shelf-edit',
kwargs={'pk': self.object.id},
)
return context
def form_valid(self, form):
messages.success(self.request, "{} updated. {}".format(
self.object, SEARCH_UPDATE_MESSAGE
))
return super(EditShelfView, self).form_valid(form)
class DeleteShelfView(LibraryMixin, DeleteView):
model = Shelf
template_name = "shelf_delete.html"
def get_success_url(self):
return reverse('shelf-list')
def form_valid(self, form):
messages.success(self.request, "{} deleted. {}".format(
self.object, SEARCH_UPDATE_MESSAGE
))
return super(DeleteShelfView, self).form_valid(form)
class LastWeekShelfView(LibraryMixin, ListView):
model = Book
template_name = "shelf.html"
paginate_by = 25
paginate_orphans = 5
def get_queryset(self):
queryset = super(LastWeekShelfView, self).get_queryset()
last_week = timezone.now() - timedelta(days=7)
return queryset.filter(created__gt=last_week)
def get_context_data(self, **kwargs):
context = super(LastWeekShelfView, self).get_context_data(**kwargs)
context['shelf_name'] = 'Added in Last Week'
context['books'] = context['object_list']
return context
class LastMonthShelfView(LibraryMixin, ListView):
model = Book
template_name = "shelf.html"
paginate_by = 25
paginate_orphans = 5
def get_queryset(self):
queryset = super(LastMonthShelfView, self).get_queryset()
last_month = timezone.now() - timedelta(days=30)
return queryset.filter(created__gt=last_month)
def get_context_data(self, **kwargs):
context = super(LastMonthShelfView, self).get_context_data(**kwargs)
context['shelf_name'] = 'Added in Last Month'
context['books'] = context['object_list']
return context | from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.shortcuts import (
get_object_or_404,
redirect,
)
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
View,
)
from books.models import (
Book,
BookOnShelf,
Shelf,
)
from books.views import (
SEARCH_UPDATE_MESSAGE,
LibraryMixin,
)
from readers.models import Reader
class ShelfListView(LibraryMixin, ListView):
model = Shelf
template_name = "shelf_list.html"
class ShelfView(LibraryMixin, DetailView):
model = Shelf
template_name = "shelf.html"
def get_context_data(self, **kwargs):
context = super(ShelfView, self).get_context_data(**kwargs)
context['books'] = [
book_on_shelf.book for book_on_shelf in BookOnShelf.objects.filter(
shelf=self.get_object()
)
]
return context
class CreateShelfView(LibraryMixin, CreateView):
model = Shelf
template_name = "shelf_edit.html"
fields = ['name',]
def get_success_url(self):
return reverse('shelf-list')
def get_context_data(self, **kwargs):
context = super(CreateShelfView, self).get_context_data(**kwargs)
context['action'] = reverse('shelf-create')
return context
class EditShelfView(LibraryMixin, UpdateView):
model = Shelf
template_name = "shelf_edit.html"
fields = ['name',]
def get_success_url(self):
return reverse(
'shelf-detail',
kwargs={'pk': self.object.id}
)
def get_context_data(self, **kwargs):
context = super(EditShelfView, self).get_context_data(**kwargs)
context['action'] = reverse(
'shelf-edit',
kwargs={'pk': self.object.id},
)
return context
def form_valid(self, form):
messages.success(self.request, "{} updated. {}".format(
self.object, SEARCH_UPDATE_MESSAGE
))
return super(EditShelfView, self).form_valid(form)
class DeleteShelfView(LibraryMixin, DeleteView):
model = Shelf
template_name = "shelf_delete.html"
def get_success_url(self):
return reverse('shelf-list')
def form_valid(self, form):
messages.success(self.request, "{} deleted. {}".format(
self.object, SEARCH_UPDATE_MESSAGE
))
return super(DeleteShelfView, self).form_valid(form)
class LastWeekShelfView(LibraryMixin, ListView):
model = Book
template_name = "shelf.html"
paginate_by = 25
paginate_orphans = 5
def get_queryset(self):
queryset = super(LastWeekShelfView, self).get_queryset()
last_week = datetime.now() - timedelta(days=7)
queryset.filter(created__gt=last_week)
return queryset
def get_context_data(self, **kwargs):
context = super(LastWeekShelfView, self).get_context_data(**kwargs)
context['shelf_name'] = 'Added in Last Week'
context['books'] = context['object_list']
return context
class LastMonthShelfView(LibraryMixin, ListView):
model = Book
template_name = "shelf.html"
paginate_by = 25
paginate_orphans = 5
def get_queryset(self):
queryset = super(LastMonthShelfView, self).get_queryset()
last_week = datetime.now() - timedelta(days=30)
queryset.filter(created__gt=last_week)
return queryset
def get_context_data(self, **kwargs):
context = super(LastMonthShelfView, self).get_context_data(**kwargs)
context['shelf_name'] = 'Added in Last Month'
context['books'] = context['object_list']
return context | Python | 0 |
7997dc0785f124dd3836bc8490c701fe99217a48 | add test mode param | umapi/api.py | umapi/api.py | import requests
import json
from error import UMAPIError, UMAPIRetryError, UMAPIRequestError, ActionFormatError
class UMAPI(object):
def __init__(self, endpoint, auth, test_mode=False):
self.endpoint = str(endpoint)
self.auth = auth
self.test_mode = test_mode
def users(self, org_id, page=0):
return self._call('/users/%s/%d' % (org_id, page), requests.get)
def groups(self, org_id, page=0):
return self._call('/groups/%s/%d' % (org_id, page), requests.get)
def action(self, org_id, action):
if not isinstance(action, Action):
if not isinstance(action, str) and hasattr(action, "__getitem__") or hasattr(action, "__iter__"):
actions = [a.data for a in action]
else:
raise ActionFormatError("action must be iterable, indexable or Action object")
else:
actions = [action.data]
if self.test_mode:
return self._call('/action/%s?testOnly=true' % org_id, requests.post, actions)
else:
return self._call('/action/%s' % org_id, requests.post, actions)
def _call(self, method, call, params=None):
data = ''
if params:
data = json.dumps(params)
res = call(self.endpoint+method, data=data, auth=self.auth)
if res.status_code == 200:
result = res.json()
if "result" in result:
if result["result"] == "error":
raise UMAPIRequestError(result["errors"][0]["errorCode"])
else:
return result
else:
raise UMAPIRequestError("Request Error -- Unknown Result Status")
if res.status_code in [429, 502, 503, 504]:
raise UMAPIRetryError(res)
else:
raise UMAPIError(res)
class Action(object):
def __init__(self, user, *args, **kwargs):
self.data = {"user": user}
for k, v in kwargs.items():
self.data[k] = v
def do(self, *args, **kwargs):
self.data["do"] = []
# add "create" / "add" / "removeFrom" first
for k, v in kwargs.items():
if k.startswith("create") or k.startswith("addAdobe") or k.startswith("removeFrom"):
self.data["do"].append({k: v})
del kwargs[k]
# now do the other actions
for k, v in kwargs.items():
if k in ['add', 'remove']:
self.data["do"].append({k: {"product": v}})
else:
self.data["do"].append({k: v})
return self
| import requests
import json
from error import UMAPIError, UMAPIRetryError, UMAPIRequestError, ActionFormatError
class UMAPI(object):
def __init__(self, endpoint, auth):
self.endpoint = str(endpoint)
self.auth = auth
def users(self, org_id, page=0):
return self._call('/users/%s/%d' % (org_id, page), requests.get)
def groups(self, org_id, page=0):
return self._call('/groups/%s/%d' % (org_id, page), requests.get)
def action(self, org_id, action):
if not isinstance(action, Action):
if not isinstance(action, str) and hasattr(action, "__getitem__") or hasattr(action, "__iter__"):
actions = [a.data for a in action]
else:
raise ActionFormatError("action must be iterable, indexable or Action object")
else:
actions = [action.data]
return self._call('/action/%s' % org_id, requests.post, actions)
def _call(self, method, call, params=None):
data = ''
if params:
data = json.dumps(params)
res = call(self.endpoint+method, data=data, auth=self.auth)
if res.status_code == 200:
result = res.json()
if "result" in result:
if result["result"] == "error":
raise UMAPIRequestError(result["errors"][0]["errorCode"])
else:
return result
else:
raise UMAPIRequestError("Request Error -- Unknown Result Status")
if res.status_code in [429, 502, 503, 504]:
raise UMAPIRetryError(res)
else:
raise UMAPIError(res)
class Action(object):
def __init__(self, user, *args, **kwargs):
self.data = {"user": user}
for k, v in kwargs.items():
self.data[k] = v
def do(self, *args, **kwargs):
self.data["do"] = []
# add "create" / "add" / "removeFrom" first
for k, v in kwargs.items():
if k.startswith("create") or k.startswith("addAdobe") or k.startswith("removeFrom"):
self.data["do"].append({k: v})
del kwargs[k]
# now do the other actions
for k, v in kwargs.items():
if k in ['add', 'remove']:
self.data["do"].append({k: {"product": v}})
else:
self.data["do"].append({k: v})
return self
| Python | 0.000001 |
b19429159f3c813297ba2e237abba276045f9ff1 | add 0.10.17, mariadb-connector-c dependency (#11044) | var/spack/repos/builtin/packages/r-rmysql/package.py | var/spack/repos/builtin/packages/r-rmysql/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRmysql(RPackage):
"""Implements 'DBI' Interface to 'MySQL' and 'MariaDB' Databases."""
homepage = "https://github.com/rstats-db/rmysql"
url = "https://cran.r-project.org/src/contrib/RMySQL_0.10.9.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RMySQL"
version('0.10.17', sha256='754df4fce159078c1682ef34fc96aa5ae30981dc91f4f2bada8d1018537255f5')
version('0.10.9', '3628200a1864ac3005cfd55cc7cde17a')
depends_on('r-dbi@0.4:', type=('build', 'run'))
depends_on('mariadb@:5.5.56')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRmysql(RPackage):
"""Implements 'DBI' Interface to 'MySQL' and 'MariaDB' Databases."""
homepage = "https://github.com/rstats-db/rmysql"
url = "https://cran.r-project.org/src/contrib/RMySQL_0.10.9.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RMySQL"
version('0.10.9', '3628200a1864ac3005cfd55cc7cde17a')
depends_on('r-dbi', type=('build', 'run'))
depends_on('mariadb')
| Python | 0 |
564f1da2c6643a4ef6d27b736620116b144fa2ac | Handle stale PostgreSQL (or others) more gracefully. Closes #3394. Thanks to flfr at stibo.com for the patch. | trac/db/pool.py | trac/db/pool.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
try:
import threading
except ImportError:
import dummy_threading as threading
threading._get_ident = lambda: 0
import time
from trac.db.util import ConnectionWrapper
class TimeoutError(Exception):
"""Exception raised by the connection pool when no connection has become
available after a given timeout."""
class PooledConnection(ConnectionWrapper):
"""A database connection that can be pooled. When closed, it gets returned
to the pool.
"""
def __init__(self, pool, cnx):
ConnectionWrapper.__init__(self, cnx)
self._pool = pool
def close(self):
if self.cnx:
self._pool._return_cnx(self.cnx)
self.cnx = None
def __del__(self):
self.close()
class ConnectionPool(object):
"""A very simple connection pool implementation."""
def __init__(self, maxsize, connector, **kwargs):
self._dormant = [] # inactive connections in pool
self._active = {} # active connections by thread ID
self._available = threading.Condition(threading.Lock())
self._maxsize = maxsize # maximum pool size
self._cursize = 0 # current pool size, includes active connections
self._connector = connector
self._kwargs = kwargs
def get_cnx(self, timeout=None):
start = time.time()
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
self._active[tid][0] += 1
return PooledConnection(self, self._active[tid][1])
while True:
if self._dormant:
cnx = self._dormant.pop()
try:
cnx.cursor() # check whether the connection is stale
break
except Exception:
cnx.close()
elif self._maxsize and self._cursize < self._maxsize:
cnx = self._connector.get_connection(**self._kwargs)
self._cursize += 1
break
else:
if timeout:
self._available.wait(timeout)
if (time.time() - start) >= timeout:
raise TimeoutError, 'Unable to get database ' \
'connection within %d seconds' \
% timeout
else:
self._available.wait()
self._active[tid] = [1, cnx]
return PooledConnection(self, cnx)
finally:
self._available.release()
def _return_cnx(self, cnx):
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
num, cnx_ = self._active.get(tid)
assert cnx is cnx_
if num > 1:
self._active[tid][0] = num - 1
else:
del self._active[tid]
if cnx not in self._dormant:
cnx.rollback()
if cnx.poolable:
self._dormant.append(cnx)
else:
self._cursize -= 1
self._available.notify()
finally:
self._available.release()
def shutdown(self):
self._available.acquire()
try:
for cnx in self._dormant:
cnx.cnx.close()
finally:
self._available.release()
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
try:
import threading
except ImportError:
import dummy_threading as threading
threading._get_ident = lambda: 0
import time
from trac.db.util import ConnectionWrapper
class TimeoutError(Exception):
"""Exception raised by the connection pool when no connection has become
available after a given timeout."""
class PooledConnection(ConnectionWrapper):
"""A database connection that can be pooled. When closed, it gets returned
to the pool.
"""
def __init__(self, pool, cnx):
ConnectionWrapper.__init__(self, cnx)
self._pool = pool
def close(self):
if self.cnx:
self._pool._return_cnx(self.cnx)
self.cnx = None
def __del__(self):
self.close()
class ConnectionPool(object):
"""A very simple connection pool implementation."""
def __init__(self, maxsize, connector, **kwargs):
self._dormant = [] # inactive connections in pool
self._active = {} # active connections by thread ID
self._available = threading.Condition(threading.Lock())
self._maxsize = maxsize # maximum pool size
self._cursize = 0 # current pool size, includes active connections
self._connector = connector
self._kwargs = kwargs
def get_cnx(self, timeout=None):
start = time.time()
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
self._active[tid][0] += 1
return PooledConnection(self, self._active[tid][1])
while True:
if self._dormant:
cnx = self._dormant.pop()
break
elif self._maxsize and self._cursize < self._maxsize:
cnx = self._connector.get_connection(**self._kwargs)
self._cursize += 1
break
else:
if timeout:
self._available.wait(timeout)
if (time.time() - start) >= timeout:
raise TimeoutError, 'Unable to get database ' \
'connection within %d seconds' \
% timeout
else:
self._available.wait()
self._active[tid] = [1, cnx]
return PooledConnection(self, cnx)
finally:
self._available.release()
def _return_cnx(self, cnx):
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
num, cnx_ = self._active.get(tid)
assert cnx is cnx_
if num > 1:
self._active[tid][0] = num - 1
else:
del self._active[tid]
if cnx not in self._dormant:
cnx.rollback()
if cnx.poolable:
self._dormant.append(cnx)
else:
self._cursize -= 1
self._available.notify()
finally:
self._available.release()
def shutdown(self):
self._available.acquire()
try:
for cnx in self._dormant:
cnx.cnx.close()
finally:
self._available.release()
| Python | 0.000002 |
bcff742c27904f995d9f5e8a184f0348b58139a5 | fix closing bracket | {{cookiecutter.repo_name}}/fabfile.py | {{cookiecutter.repo_name}}/fabfile.py | # -*- coding: utf-8 -*-
import os
import datetime
from contextlib import contextmanager
from fabric.api import env, run, local, prefix, sudo
def live():
"""Connects to the server."""
env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')]
env.user = 'freshmilk'
env.cwd = '/var/www/{{cookiecutter.domain_name}}'
env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd)
def beta():
"""Connects to beta/testing server"""
env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')]
env.user = 'freshmilk'
env.cwd = '/var/www/beta.{{cookiecutter.domain_name}}'
env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd)
def gitpull(tag=None):
"""Pulls upstream branch on the server."""
if tag is not None:
run('git pull')
run('git checkout %s' % tag)
else:
run('git pull')
@contextmanager
def source_env():
"""Actives embedded virtual env"""
with prefix('source env/bin/activate'):
yield
def collectstatic():
"""Collect static files on server."""
with source_env():
run('python manage.py collectstatic')
def migrate():
"""Sync project database on server."""
with source_env():
run('python manage.py migrate')
def touch():
"""Touch the wsgi file."""
run('touch {{cookiecutter.repo_name}}/wsgi.py')
def update(tag=None):
"""
Runs gitpull, develop, collectstatic, migrate and touch.
"""
gitpull()
collectstatic()
migrate()
touch()
def dump():
with source_env():
run('python manage.py sqldump')
def sync_media():
local('rsync -avzh -e ssh %s/media/* media/' % env.connect_to)
def sync_dump():
local('rsync -avPhzL -e ssh %s/var/dump.sql.gz var' % env.connect_to)
def mirror():
"""Runs dump, sync_media, sync_dump and sqlimport."""
dump()
sync_dump()
local('python manage.py sqlimport')
sync_media()
| # -*- coding: utf-8 -*-
import os
import datetime
from contextlib import contextmanager
from fabric.api import env, run, local, prefix, sudo
def live():
"""Connects to the server."""
env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')]
env.user = 'freshmilk'
env.cwd = '/var/www/{{cookiecutter.domain_name}}'
env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd)
def beta():
"""Connects to beta/testing server"""
env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')]
env.user = 'freshmilk'
env.cwd = '/var/www/beta.{{cookiecutter.domain_name}}'
env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd)
def gitpull(tag=None):
"""Pulls upstream branch on the server."""
if tag is not None:
run('git pull')
run('git checkout %s' % tag)
else:
run('git pull')
@contextmanager
def source_env():
"""Actives embedded virtual env"""
with prefix('source env/bin/activate'):
yield
def collectstatic():
"""Collect static files on server."""
with source_env():
run('python manage.py collectstatic')
def migrate():
"""Sync project database on server."""
with source_env():
run('python manage.py migrate')
def touch():
"""Touch the wsgi file."""
run('touch {{cookiecutter.repo_name}}/wsgi.py')
def update(tag=None):
"""
Runs gitpull, develop, collectstatic, migrate and touch.
"""
gitpull()
collectstatic()
migrate()
touch()
def dump():
with source_env():
run('python manage.py sqldump'
def sync_media():
local('rsync -avzh -e ssh %s/media/* media/' % env.connect_to)
def sync_dump():
local('rsync -avPhzL -e ssh %s/var/dump.sql.gz var' % env.connect_to)
def mirror():
"""Runs dump, sync_media, sync_dump and sqlimport."""
dump()
sync_dump()
local('python manage.py sqlimport')
sync_media()
| Python | 0.000001 |
645c640f38ae67008eb18c79301e19ddfd39c041 | use new valgrind repo (#8538) | var/spack/repos/builtin/packages/valgrind/package.py | var/spack/repos/builtin/packages/valgrind/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Valgrind(AutotoolsPackage):
"""An instrumentation framework for building dynamic analysis.
There are Valgrind tools that can automatically detect many memory
management and threading bugs, and profile your programs in
detail. You can also use Valgrind to build new tools.
Valgrind is Open Source / Free Software, and is freely available
under the GNU General Public License, version 2.
"""
homepage = "http://valgrind.org/"
url = "https://sourceware.org/pub/valgrind/valgrind-3.13.0.tar.bz2"
version('3.13.0', '817dd08f1e8a66336b9ff206400a5369')
version('3.12.0', '6eb03c0c10ea917013a7622e483d61bb')
version('3.11.0', '4ea62074da73ae82e0162d6550d3f129')
version('3.10.1', '60ddae962bc79e7c95cfc4667245707f')
version('3.10.0', '7c311a72a20388aceced1aa5573ce970')
version('develop', git='git://sourceware.org/git/valgrind.git')
variant('mpi', default=True,
description='Activates MPI support for valgrind')
variant('boost', default=True,
description='Activates boost support for valgrind')
depends_on('mpi', when='+mpi')
depends_on('boost', when='+boost')
depends_on("autoconf", type='build', when='@develop')
depends_on("automake", type='build', when='@develop')
depends_on("libtool", type='build', when='@develop')
# Apply the patch suggested here:
# http://valgrind.10908.n7.nabble.com/Unable-to-compile-on-Mac-OS-X-10-11-td57237.html
patch('valgrind_3_12_0_osx.patch', when='@3.12.0 platform=darwin')
def configure_args(self):
spec = self.spec
options = []
if not (spec.satisfies('%clang') and sys.platform == 'darwin'):
# Otherwise with (Apple's) clang there is a linker error:
# clang: error: unknown argument: '-static-libubsan'
options.append('--enable-ubsan')
if sys.platform == 'darwin':
options.extend([
'--build=amd64-darwin',
'--enable-only64bit'
])
return options
| ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Valgrind(AutotoolsPackage):
"""An instrumentation framework for building dynamic analysis.
There are Valgrind tools that can automatically detect many memory
management and threading bugs, and profile your programs in
detail. You can also use Valgrind to build new tools.
Valgrind is Open Source / Free Software, and is freely available
under the GNU General Public License, version 2.
"""
homepage = "http://valgrind.org/"
url = "https://sourceware.org/pub/valgrind/valgrind-3.13.0.tar.bz2"
version('3.13.0', '817dd08f1e8a66336b9ff206400a5369')
version('3.12.0', '6eb03c0c10ea917013a7622e483d61bb')
version('3.11.0', '4ea62074da73ae82e0162d6550d3f129')
version('3.10.1', '60ddae962bc79e7c95cfc4667245707f')
version('3.10.0', '7c311a72a20388aceced1aa5573ce970')
version('develop', svn='svn://svn.valgrind.org/valgrind/trunk')
variant('mpi', default=True,
description='Activates MPI support for valgrind')
variant('boost', default=True,
description='Activates boost support for valgrind')
depends_on('mpi', when='+mpi')
depends_on('boost', when='+boost')
depends_on("autoconf", type='build', when='@develop')
depends_on("automake", type='build', when='@develop')
depends_on("libtool", type='build', when='@develop')
# Apply the patch suggested here:
# http://valgrind.10908.n7.nabble.com/Unable-to-compile-on-Mac-OS-X-10-11-td57237.html
patch('valgrind_3_12_0_osx.patch', when='@3.12.0 platform=darwin')
def configure_args(self):
spec = self.spec
options = []
if not (spec.satisfies('%clang') and sys.platform == 'darwin'):
# Otherwise with (Apple's) clang there is a linker error:
# clang: error: unknown argument: '-static-libubsan'
options.append('--enable-ubsan')
if sys.platform == 'darwin':
options.extend([
'--build=amd64-darwin',
'--enable-only64bit'
])
return options
| Python | 0 |
88a028663b7688af362a2ebd5c168aaccc5695c0 | Comment updates | bravado/mapping/request.py | bravado/mapping/request.py | from bravado.mapping.operation import log
from bravado.mapping.param import unmarshal_param
class RequestLike(object):
"""
Common interface for server side request objects.
Subclasses are responsible for providing attrs for __required_attrs__.
"""
__required_attrs__ = [
'path', # dict of URL path parameters
'params', # dict of parameters from the query string and request body.
'headers', # dict of request headers
]
def __getattr__(self, name):
"""
When an attempt to access a required attribute that doesn't exist
is made, let the caller know that the type is non-compliant in its
attempt to be `RequestList`. This is in place of the usual throwing
of an AttributeError.
Reminder: __getattr___ is only called when it has already been
determined that this object does not have the given attr.
:raises: NotImplementedError when the subclass has not provided access
to a required attribute.
"""
if name in self.__required_attrs__:
raise NotImplementedError(
'This RequestLike type {0} forgot to implement an attr '
'for `{1}`'.format(type(self), name))
raise AttributeError(
"'{0}' object has no attribute '{1}'".format(type(self), name))
def json(self, **kwargs):
"""
:return: request content in a json-like form
:rtype: int, float, double, string, unicode, list, dict
"""
raise NotImplementedError("Implement json() in {0}".format(type(self)))
def unmarshal_request(request, op):
"""Unmarshal Swagger request parameters from the passed in request like
object.
:type request: :class: `bravado.mapping.request.RequestLike`.
:type op: :class:`bravado.mapping.operation.Operation`
:returns: dict where (key, value) = (param_name, param_value)
"""
request_data = {}
for param_name, param in op.params.iteritems():
param_value = unmarshal_param(param, request)
request_data[param_name] = param_value
log.debug("Swagger request_data: {0}".format(request_data))
return request_data
| from bravado.mapping.operation import log
from bravado.mapping.param import unmarshal_param
class RequestLike(object):
"""
Define a common interface for bravado to interface with server side
request objects.
Subclasses are responsible for providing attrs for __required_attrs__.
"""
__required_attrs__ = [
'path', # dict of URL path parameters
'params', # dict of parameters from the query string and request body.
'headers', # dict of request headers
]
def __getattr__(self, name):
"""
When an attempt to access a required attribute that doesn't exist
is made, let the caller know that the type is non-compliant in its
attempt to be `RequestList`. This is in place of the usual throwing
of an AttributeError.
Reminder: __getattr___ is only called when it has already been
determined that this object does not have the given attr.
:raises: NotImplementedError when the subclass has not provided access
to a required attribute.
"""
if name in self.__required_attrs__:
raise NotImplementedError(
'This RequestLike type {0} forgot to implement an attr '
'for `{1}`'.format(type(self), name))
raise AttributeError(
"'{0}' object has no attribute '{1}'".format(type(self), name))
def json(self, **kwargs):
"""
:return: request content in a json-like form
:rtype: int, float, double, string, unicode, list, dict
"""
raise NotImplementedError("Implement json() in {0}".format(type(self)))
def unmarshal_request(request, op):
"""Unmarshal Swagger request parameters from the passed in request like
object.
:type request: :class: `bravado.mapping.request.RequestLike`.
:type op: :class:`bravado.mapping.operation.Operation`
:returns: dict where (key, value) = (param_name, param_value)
"""
request_data = {}
for param_name, param in op.params.iteritems():
param_value = unmarshal_param(param, request)
request_data[param_name] = param_value
log.debug("Swagger request_data: {0}".format(request_data))
return request_data
| Python | 0 |
f0dda4f875c13947d47cf91a58e9a834a5e4a92c | Fix flapping demo geo_location test (#37516) | tests/components/demo/test_geo_location.py | tests/components/demo/test_geo_location.py | """The tests for the demo platform."""
import unittest
import pytest
from homeassistant.components import geo_location
from homeassistant.components.demo.geo_location import (
DEFAULT_UPDATE_INTERVAL,
NUMBER_OF_DEMO_DEVICES,
)
from homeassistant.const import LENGTH_KILOMETERS
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
assert_setup_component,
fire_time_changed,
get_test_home_assistant,
)
CONFIG = {geo_location.DOMAIN: [{"platform": "demo"}]}
@pytest.fixture(autouse=True)
def mock_legacy_time(legacy_patchable_time):
"""Make time patchable for all the tests."""
yield
class TestDemoPlatform(unittest.TestCase):
"""Test the demo platform."""
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.addCleanup(self.hass.stop)
def test_setup_platform(self):
"""Test setup of demo platform via configuration."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
with assert_setup_component(1, geo_location.DOMAIN):
assert setup_component(self.hass, geo_location.DOMAIN, CONFIG)
self.hass.block_till_done()
# In this test, one zone and geolocation entities have been
# generated.
all_states = [
self.hass.states.get(entity_id)
for entity_id in self.hass.states.entity_ids(geo_location.DOMAIN)
]
assert len(all_states) == NUMBER_OF_DEMO_DEVICES
for state in all_states:
# Check a single device's attributes.
if state.domain != geo_location.DOMAIN:
# ignore home zone state
continue
assert (
abs(state.attributes["latitude"] - self.hass.config.latitude) < 1.0
)
assert (
abs(state.attributes["longitude"] - self.hass.config.longitude)
< 1.0
)
assert state.attributes["unit_of_measurement"] == LENGTH_KILOMETERS
# Update (replaces 1 device).
fire_time_changed(self.hass, utcnow + DEFAULT_UPDATE_INTERVAL)
self.hass.block_till_done()
# Get all states again, ensure that the number of states is still
# the same, but the lists are different.
all_states_updated = [
self.hass.states.get(entity_id)
for entity_id in self.hass.states.entity_ids(geo_location.DOMAIN)
]
assert len(all_states_updated) == NUMBER_OF_DEMO_DEVICES
assert all_states != all_states_updated
| """The tests for the demo platform."""
import unittest
from homeassistant.components import geo_location
from homeassistant.components.demo.geo_location import (
DEFAULT_UPDATE_INTERVAL,
NUMBER_OF_DEMO_DEVICES,
)
from homeassistant.const import LENGTH_KILOMETERS
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
assert_setup_component,
fire_time_changed,
get_test_home_assistant,
)
CONFIG = {geo_location.DOMAIN: [{"platform": "demo"}]}
class TestDemoPlatform(unittest.TestCase):
"""Test the demo platform."""
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.addCleanup(self.hass.stop)
def test_setup_platform(self):
"""Test setup of demo platform via configuration."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
with assert_setup_component(1, geo_location.DOMAIN):
assert setup_component(self.hass, geo_location.DOMAIN, CONFIG)
self.hass.block_till_done()
# In this test, one zone and geolocation entities have been
# generated.
all_states = [
self.hass.states.get(entity_id)
for entity_id in self.hass.states.entity_ids(geo_location.DOMAIN)
]
assert len(all_states) == NUMBER_OF_DEMO_DEVICES
for state in all_states:
# Check a single device's attributes.
if state.domain != geo_location.DOMAIN:
# ignore home zone state
continue
assert (
abs(state.attributes["latitude"] - self.hass.config.latitude) < 1.0
)
assert (
abs(state.attributes["longitude"] - self.hass.config.longitude)
< 1.0
)
assert state.attributes["unit_of_measurement"] == LENGTH_KILOMETERS
# Update (replaces 1 device).
fire_time_changed(self.hass, utcnow + DEFAULT_UPDATE_INTERVAL)
self.hass.block_till_done()
# Get all states again, ensure that the number of states is still
# the same, but the lists are different.
all_states_updated = [
self.hass.states.get(entity_id)
for entity_id in self.hass.states.entity_ids(geo_location.DOMAIN)
]
assert len(all_states_updated) == NUMBER_OF_DEMO_DEVICES
assert all_states != all_states_updated
| Python | 0 |
10a2b3def6936d94e21ac68a15b3ae1428e75e41 | Make the disassembler script work on Linux. | util/dasm.py | util/dasm.py | #!/usr/bin/env python
import sys
import optparse
import subprocess
import re
def dasm(infile, outfp, verbose = True):
command_line = [
'objdump',
'--disassemble',
'--disassemble-zeroes',
'--disassembler-options=att,suffix',
#'--prefix-addresses',
'--no-show-raw-insn',
'--wide',
infile
]
p = subprocess.Popen(command_line, stdout=subprocess.PIPE, shell=False)
#print p.communicate()[0]; return
infp = p.stdout
it = iter(infp)
for line in it:
# TODO: handle other sections too
if line == "Disassembly of section .text:\n":
break
insns = []
addrs = {}
for line in it:
if not line:
break
line = line[:-1]
if not line:
continue
if line.startswith("Disassembly of section "):
break
line = re.sub(r"([0-9A-Fa-f]+) <([._@A-Za-z][_@A-Za-z]*)>", r"\2", line)
line = re.sub(r"([0-9A-Fa-f]+) <([^>]*)>", r"0x\1", line)
addr, insn = [part.strip() for part in line.split(":", 1)]
if insn == "(bad)":
continue
try:
intaddr = int(addr, 16)
except ValueError:
pass
else:
addr = "loc" + addr
addrs[intaddr] = addr
insns.append((addr, insn))
def repl(mo):
addr = mo.group()
try:
return addrs[int(addr,16)]
except KeyError:
return addr
for addr, insn in insns:
insn = re.sub(r'\b0[xX]([0-9a-fA-F]+)\b', repl, insn)
outfp.write("%s: %s\n" % (addr, insn))
def main():
parser = optparse.OptionParser(
usage = "\n\t%prog [options] executable ...",
version = "%prog 1.0")
parser.add_option(
'-o', '--output',
type = "string", dest = "output",
help = "specify output assembly file")
parser.add_option(
'-v', '--verbose',
action = "count", dest = "verbose", default = 1,
help = "show extra information")
parser.add_option(
'-q', '--quiet',
action = "store_const", dest = "verbose", const = 0,
help = "no extra information")
(options, args) = parser.parse_args(sys.argv[1:])
for arg in args:
if options.output is None:
# root, ext = os.path.splitext(arg)
# fpout = file(root + '.s', 'wt')
#elif options.output is '-':
fpout = sys.stdout
else:
fpout = file(options.output, 'wt')
dasm(arg, fpout, options.verbose)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import sys
import optparse
import subprocess
import re
def dasm(infile, outfp, verbose = True):
command_line = [
'objdump',
'--disassemble',
'--disassemble-zeroes',
'--disassembler-options=att,suffix',
'--prefix-addresses',
'--no-show-raw-insn',
'--wide',
infile
]
p = subprocess.Popen(command_line, stdout=subprocess.PIPE, shell=False)
infp = p.stdout
for line in infp:
if line == "Disassembly of section .text:\n":
break
insns = []
addrs = set()
for line in infp:
line = line[:-1]
if not line:
break
addr, insn = line.split(" ", 1)
if insn.strip() == "(bad)":
continue
insns.append((addr, insn))
addrs.add(addr)
def repl(mo):
addr = mo.group()
if addr in addrs:
return "loc" + addr[2:]
else:
return addr
for addr, insn in insns:
insn = re.sub(r'\b0x[0-9a-fA-F]+\b', repl, insn)
addr = "loc" + addr[2:]
outfp.write("%s: %s\n" % (addr, insn))
def main():
parser = optparse.OptionParser(
usage = "\n\t%prog [options] executable ...",
version = "%prog 1.0")
parser.add_option(
'-o', '--output',
type = "string", dest = "output",
help = "specify output assembly file")
parser.add_option(
'-v', '--verbose',
action = "count", dest = "verbose", default = 1,
help = "show extra information")
parser.add_option(
'-q', '--quiet',
action = "store_const", dest = "verbose", const = 0,
help = "no extra information")
(options, args) = parser.parse_args(sys.argv[1:])
for arg in args:
if options.output is None:
# root, ext = os.path.splitext(arg)
# fpout = file(root + '.s', 'wt')
#elif options.output is '-':
fpout = sys.stdout
else:
fpout = file(options.output, 'wt')
dasm(arg, fpout, options.verbose)
if __name__ == '__main__':
main()
| Python | 0 |
887b03d7587525509d3652ef42b930025194d2ad | Update 2sum.py | Array/2sum.py | Array/2sum.py | Given an array of integers, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target,
where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
class Solution:
# @return a tuple, (index1, index2)
# 48s
# O(n)
def twoSum(self, nums, target):
if not nums or len(nums) < 2: return None
idict = {}
for i, value in enumerate(nums):
if target - value in idict:
return [idict[target-value], i+1]
idict[value] = i+1
# 79ms
def twoSum(self, num, target):
dic = {}
for i in xrange(len(num)):
if num[i] in dic:
result1 = dic[num[i]] +1
result2 = i +1
else:
dic[target-num[i]] = i
return (result1,result2)
# 68ms
def twoSum(self, num, target):
tmpnum = num[:]
tmpnum.sort()
length = len(num)
i = 0; j = length-1
while i < j:
tmpval = tmpnum[i]+tmpnum[j]
if tmpval == target:
res1 = num.index(tmpnum[i])
num.reverse()
res2 = len(num)-1-num.index(tmpnum[j])
if res1<res2: return (res1+1,res2+1)
else: return(res2+1,res1+1)
if tmpval > target:
j -= 1
if tmpval < target:
i += 1
| Given an array of integers, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target,
where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, nums, target):
if not nums or len(nums) < 2: return None
idict = {}
for i, value in enumerate(nums):
if target - value in idict:
return [idict[target-value], i+1]
idict[value] = i+1
# 79ms
def twoSum(self, num, target):
dic = {}
for i in xrange(len(num)):
if num[i] in dic:
result1 = dic[num[i]] +1
result2 = i +1
else:
dic[target-num[i]] = i
return (result1,result2)
# 68ms
def twoSum(self, num, target):
tmpnum = num[:]
tmpnum.sort()
length = len(num)
i = 0; j = length-1
while i < j:
tmpval = tmpnum[i]+tmpnum[j]
if tmpval == target:
res1 = num.index(tmpnum[i])
num.reverse()
res2 = len(num)-1-num.index(tmpnum[j])
if res1<res2: return (res1+1,res2+1)
else: return(res2+1,res1+1)
if tmpval > target:
j -= 1
if tmpval < target:
i += 1
| Python | 0 |
dbdfbc18ebadc0a1d50a6513bb982d2e3881036f | Add MAX_TURNS and some more output to train-ml-bot | train-ml-bot.py | train-ml-bot.py | """
Train a machine learning model for the classifier bot. We create a player, and watch it play games against itself.
Every observed state is converted to a feature vector and labeled with the eventual outcome
(-1.0: player 2 won, 1.0: player 1 won)
This is part of the second worksheet.
"""
from api import State, util
# This package contains various machine learning algorithms
import sys
import sklearn
import sklearn.linear_model
from sklearn.externals import joblib
from bots.rand import rand
# from bots.alphabeta import alphabeta
from bots.ml import ml
from bots.ml.ml import features
import matplotlib.pyplot as plt
# How many games to play
GAMES = 1000
# Number of planets in the field
NUM_PLANETS = 6
# Maximum number of turns to play
NUM_TURNS = 100
# The player we'll observe
player = rand.Bot()
# player = alphabeta.Bot()
data = []
target = []
for g in range(GAMES):
state, id = State.generate(NUM_PLANETS)
state_vectors = []
i = 0
while not state.finished() and i <= NUM_TURNS:
state_vectors.append(features(state))
move = player.get_move(state)
state = state.next(move)
i += 1
winner = state.winner()
for state_vector in state_vectors:
data.append(state_vector)
target.append('won' if winner == 1 else 'lost')
sys.stdout.write(".")
sys.stdout.flush()
if g % (GAMES/10) == 0:
print("")
print('game {} finished ({}%)'.format(g, (g/float(GAMES)*100)))
# Train a logistic regression model
learner = sklearn.linear_model.LogisticRegression()
model = learner.fit(data, target)
# Check for class imbalance
count = {}
for str in target:
if str not in count:
count[str] = 0
count[str] += 1
print('instances per class: {}'.format(count))
# Store the model in the ml directory
joblib.dump(model, './bots/ml/model.pkl')
print('Done')
| """
Train a machine learning model for the classifier bot. We create a player, and watch it play games against itself.
Every observed state is converted to a feature vector and labeled with the eventual outcome
(-1.0: player 2 won, 1.0: player 1 won)
This is part of the second worksheet.
"""
from api import State, util
# This package contains various machine learning algorithms
import sklearn
import sklearn.linear_model
from sklearn.externals import joblib
from bots.rand import rand
# from bots.alphabeta import alphabeta
from bots.ml import ml
from bots.ml.ml import features
import matplotlib.pyplot as plt
# How many games to play
GAMES = 1000
# Number of planets in the field
NUM_PLANETS = 6
# The player we'll observe
player = rand.Bot()
# player = alphabeta.Bot()
data = []
target = []
for g in range(GAMES):
state, id = State.generate(NUM_PLANETS)
state_vectors = []
while not state.finished():
state_vectors.append(features(state))
move = player.get_move(state)
state = state.next(move)
winner = state.winner()
for state_vector in state_vectors:
data.append(state_vector)
target.append('won' if winner == 1 else 'lost')
if g % (GAMES/10) == 0:
print('game {} finished ({}%)'.format(g, (g/float(GAMES)*100) ))
# Train a logistic regression model
learner = sklearn.linear_model.LogisticRegression()
model = learner.fit(data, target)
# Check for class imbalance
count = {}
for str in target:
if str not in count:
count[str] = 0
count[str] += 1
print('instances per class: {}'.format(count))
# Store the model in the ml directory
joblib.dump(model, './bots/ml/model.pkl')
print('Done')
| Python | 0 |
c5af0d98407052b9f04e37efc741c9b457825eb7 | Fix reading JSON file | Python/scoreP2.py | Python/scoreP2.py | # coding=utf-8
import json
import os
import numpy as np
from scoreCommon import matchInputFile, \
computeCommonMetrics, computeAveragePrecisionMetrics
_FEATURE_NAMES = ['globules', 'streaks']
def loadFeatures(featuresPath):
try:
with open(featuresPath) as f:
features = json.load(f)
except IOError:
raise Exception('Internal error: error reading JSON file: %s'
% os.path.basename(featuresPath))
except ValueError:
# TODO: is this the right error type?
raise Exception('Could not parse file "%s" as JSON.' %
os.path.basename(featuresPath))
if not isinstance(features, dict):
raise Exception('JSON file %s does not contain an Object '
'(key-value mapping) at the top-level.' %
os.path.basename(featuresPath))
for featureName in _FEATURE_NAMES:
if featureName not in features:
raise Exception('JSON file "%s" does not contain an element for '
'feature "%s".' %
(os.path.basename(featuresPath), featureName))
if not isinstance(features[featureName], list):
raise Exception('Feature "%s" in JSON file "%s" is not an Array.' %
(featureName, os.path.basename(featuresPath)))
try:
features[featureName] = [
float(superpixelValue)
for superpixelValue in features[featureName]
]
except ValueError:
raise Exception('Array for feature "%s" in JSON file "%s" contains '
'non-floating-point value(s).' %
(featureName, os.path.basename(featuresPath)))
for superpixelValue in features[featureName]:
if not (0.0 <= superpixelValue <= 1.0):
raise Exception('Array for feature "%s" in JSON file "%s" '
'contains a value outside the range '
'[0.0, 1.0].' %
(featureName, os.path.basename(featuresPath)))
return features
def scoreP2Features(truthPath, testPath):
truthFeatures = loadFeatures(truthPath)
testFeatures = loadFeatures(testPath)
scores = []
for featureName in _FEATURE_NAMES:
if len(testFeatures[featureName]) != len(truthFeatures[featureName]):
raise Exception('Array for feature "%s" in JSON file "%s" is length'
' %d (expected length %d).' %
(featureName, os.path.basename(testPath),
len(testFeatures[featureName]),
len(truthFeatures[featureName])))
# Build the Numpy arrays for calculations
truthValues = np.array(truthFeatures[featureName])
testValues = np.array(testFeatures[featureName])
# Compute accuracy, sensitivity, and specificity
truthBinaryValues = truthValues > 0.5
testBinaryValues = testValues > 0.5
metrics = computeCommonMetrics(truthBinaryValues, testBinaryValues)
# Compute average precision
metrics.extend(computeAveragePrecisionMetrics(truthValues, testValues))
# truthPath ~= '/.../ISIC_0000003.json'
datasetName = os.path.splitext(os.path.basename(truthPath))[0]
scores.append({
'dataset': '%s_%s' % (datasetName, featureName),
'metrics': metrics
})
return scores
def scoreP2(truthDir, testDir):
scores = []
for truthFile in sorted(os.listdir(truthDir)):
testPath = matchInputFile(truthFile, testDir)
truthPath = os.path.join(truthDir, truthFile)
scores.extend(scoreP2Features(truthPath, testPath))
return scores
| # coding=utf-8
import json
import os
import numpy as np
from scoreCommon import matchInputFile, \
computeCommonMetrics, computeAveragePrecisionMetrics
_FEATURE_NAMES = ['globules', 'streaks']
def loadFeatures(featuresPath):
try:
features = json.load(featuresPath)
except ValueError:
# TODO: is this the right error type?
raise Exception('Could not parse file "%s" as JSON.' %
os.path.basename(featuresPath))
if not isinstance(features, dict):
raise Exception('JSON file %s does not contain an Object '
'(key-value mapping) at the top-level.' %
os.path.basename(featuresPath))
for featureName in _FEATURE_NAMES:
if featureName not in features:
raise Exception('JSON file "%s" does not contain an element for '
'feature "%s".' %
(os.path.basename(featuresPath), featureName))
if not isinstance(features[featureName], list):
raise Exception('Feature "%s" in JSON file "%s" is not an Array.' %
(featureName, os.path.basename(featuresPath)))
try:
features[featureName] = [
float(superpixelValue)
for superpixelValue in features[featureName]
]
except ValueError:
raise Exception('Array for feature "%s" in JSON file "%s" contains '
'non-floating-point value(s).' %
(featureName, os.path.basename(featuresPath)))
for superpixelValue in features[featureName]:
if not (0.0 <= superpixelValue <= 1.0):
raise Exception('Array for feature "%s" in JSON file "%s" '
'contains a value outside the range '
'[0.0, 1.0].' %
(featureName, os.path.basename(featuresPath)))
return features
def scoreP2Features(truthPath, testPath):
truthFeatures = loadFeatures(truthPath)
testFeatures = loadFeatures(testPath)
scores = []
for featureName in _FEATURE_NAMES:
if len(testFeatures[featureName]) != len(truthFeatures[featureName]):
raise Exception('Array for feature "%s" in JSON file "%s" is length'
' %d (expected length %d).' %
(featureName, os.path.basename(testPath),
len(testFeatures[featureName]),
len(truthFeatures[featureName])))
# Build the Numpy arrays for calculations
truthValues = np.array(truthFeatures[featureName])
testValues = np.array(testFeatures[featureName])
# Compute accuracy, sensitivity, and specificity
truthBinaryValues = truthValues > 0.5
testBinaryValues = testValues > 0.5
metrics = computeCommonMetrics(truthBinaryValues, testBinaryValues)
# Compute average precision
metrics.extend(computeAveragePrecisionMetrics(truthValues, testValues))
# truthPath ~= '/.../ISIC_0000003.json'
datasetName = os.path.splitext(os.path.basename(truthPath))[0]
scores.append({
'dataset': '%s_%s' % (datasetName, featureName),
'metrics': metrics
})
return scores
def scoreP2(truthDir, testDir):
scores = []
for truthFile in sorted(os.listdir(truthDir)):
testPath = matchInputFile(truthFile, testDir)
truthPath = os.path.join(truthDir, truthFile)
scores.extend(scoreP2Features(truthPath, testPath))
return scores
| Python | 0.999549 |
6454bca66b73efa6e124fce80634fc98bd0b9c25 | add new dependencies for python 3.7.6 | Back/setup.py | Back/setup.py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'marshmallow==3.3.0',
'pyodbc==4.0.27',
'pyramid==1.10.4',
'sqlalchemy==1.3.12',
'transaction==3.0.0',
'waitress==1.4.2',
'webargs==6.0.0b3'
]
setup(
name='ns_portal',
version='0.4',
description='ns_portal',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='ns_portal',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = ns_portal:main
[console_scripts]
initialize_ns_portal_db = ns_portal.scripts.initializedb:main
"""
)
| import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyodbc==4.0.28',
'pyramid==1.10.4',
'sqlalchemy==1.3.12',
'transaction==3.0.0',
'waitress==1.4.2',
'webargs==6.0.0b2'
]
setup(
name='ns_portal',
version='0.3',
description='ns_portal',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='ns_portal',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = ns_portal:main
[console_scripts]
initialize_ns_portal_db = ns_portal.scripts.initializedb:main
"""
)
| Python | 0.000001 |
d9c9f9c363f5520f37800930efd9eaa1e43daed7 | bump version | ttt/__init__.py | ttt/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.3.2'
| # -*- coding: utf-8 -*-
__version__ = '0.3.1'
| Python | 0 |
c6c74870e6557dbd0523d1b01f377f14b05b632a | Add db check to makeTest to ensure we cover cases where an explicit path to a test function is passed | django_nose/plugin.py | django_nose/plugin.py | import os.path
import sys
from django.conf import settings
from django.db.models.loading import get_apps, load_app
from django.test.testcases import TransactionTestCase
class ResultPlugin(object):
"""
Captures the TestResult object for later inspection.
nose doesn't return the full test result object from any of its runner
methods. Pass an instance of this plugin to the TestProgram and use
``result`` after running the tests to get the TestResult object.
"""
name = "result"
enabled = True
def finalize(self, result):
self.result = result
class DjangoSetUpPlugin(object):
"""
Configures Django to setup and tear down the environment.
This allows coverage to report on all code imported and used during the
initialisation of the test runner.
Only sets up databases if a single class inherits from
``django.test.testcases.TransactionTestCase``.
Also ensures you don't run the same test case multiple times.
"""
name = "django setup"
enabled = True
def __init__(self, runner):
super(DjangoSetUpPlugin, self).__init__()
self.runner = runner
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
self.needs_db = False
self.started = False
self._registry = set()
def begin(self):
self.add_apps = set()
def wantClass(self, cls):
if issubclass(cls, TransactionTestCase):
self.needs_db = True
if cls in self._registry:
return False
self._registry.add(cls)
def wantMethod(self, method):
if issubclass(method.im_class, TransactionTestCase):
self.needs_db = True
if method in self._registry:
return False
self._registry.add(method)
def wantFunction(self, function):
if function in self._registry:
return False
self._registry.add(function)
def makeTest(self, test, parent):
if self.needs_db:
return
if not test.im_class:
return
if issubclass(test.im_class, TransactionTestCase):
self.needs_db = True
def beforeImport(self, filename, module):
# handle case of tests.models
if not os.path.isdir(filename):
filepath = os.path.dirname(filename)
module = module.rsplit('.', 1)[0]
else:
filepath = filename
models_path = os.path.join(filepath, 'models.py')
if os.path.exists(models_path):
self.add_apps.add(module)
# handle case of fooapp.tests, where fooapp.models exists
models_path = os.path.join(filepath, os.pardir, 'models.py')
if os.path.exists(models_path):
self.add_apps.add(module.rsplit('.', 1)[0])
def prepareTestRunner(self, test):
cur_stdout = sys.stdout
cur_stderr = sys.stderr
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
if self.add_apps:
for app in self.add_apps:
if app in settings.INSTALLED_APPS:
continue
mod = load_app(app)
if mod:
settings.INSTALLED_APPS.append(app)
get_apps()
self.runner.setup_test_environment()
if self.needs_db:
self.old_names = self.runner.setup_databases()
sys.stdout = cur_stdout
sys.stderr = cur_stderr
self.started = True
def finalize(self, result):
if self.started:
if self.needs_db:
self.runner.teardown_databases(self.old_names)
self.runner.teardown_test_environment()
| import os.path
import sys
from django.conf import settings
from django.db.models.loading import get_apps, load_app
from django.test.testcases import TransactionTestCase
class ResultPlugin(object):
"""
Captures the TestResult object for later inspection.
nose doesn't return the full test result object from any of its runner
methods. Pass an instance of this plugin to the TestProgram and use
``result`` after running the tests to get the TestResult object.
"""
name = "result"
enabled = True
def finalize(self, result):
self.result = result
class DjangoSetUpPlugin(object):
"""
Configures Django to setup and tear down the environment.
This allows coverage to report on all code imported and used during the
initialisation of the test runner.
Only sets up databases if a single class inherits from
``django.test.testcases.TransactionTestCase``.
Also ensures you don't run the same test case multiple times.
"""
name = "django setup"
enabled = True
def __init__(self, runner):
super(DjangoSetUpPlugin, self).__init__()
self.runner = runner
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
self.needs_db = False
self.started = False
self._registry = set()
def begin(self):
self.add_apps = set()
def wantClass(self, cls):
if issubclass(cls, TransactionTestCase):
self.needs_db = True
if cls in self._registry:
return False
self._registry.add(cls)
def wantMethod(self, method):
if issubclass(method.im_class, TransactionTestCase):
self.needs_db = True
if method in self._registry:
return False
self._registry.add(method)
def wantFunction(self, function):
if function in self._registry:
return False
self._registry.add(function)
def beforeImport(self, filename, module):
# handle case of tests.models
if not os.path.isdir(filename):
filepath = os.path.dirname(filename)
module = module.rsplit('.', 1)[0]
else:
filepath = filename
models_path = os.path.join(filepath, 'models.py')
if os.path.exists(models_path):
self.add_apps.add(module)
# handle case of fooapp.tests, where fooapp.models exists
models_path = os.path.join(filepath, os.pardir, 'models.py')
if os.path.exists(models_path):
self.add_apps.add(module.rsplit('.', 1)[0])
def prepareTestRunner(self, test):
cur_stdout = sys.stdout
cur_stderr = sys.stderr
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
if self.add_apps:
for app in self.add_apps:
if app in settings.INSTALLED_APPS:
continue
mod = load_app(app)
if mod:
settings.INSTALLED_APPS.append(app)
get_apps()
self.runner.setup_test_environment()
if self.needs_db:
self.old_names = self.runner.setup_databases()
sys.stdout = cur_stdout
sys.stderr = cur_stderr
self.started = True
def finalize(self, result):
if self.started:
if self.needs_db:
self.runner.teardown_databases(self.old_names)
self.runner.teardown_test_environment()
| Python | 0 |
aa3a6dd01d7681f92d1be42fb2831126ced7a76e | Update __init__.py | django_su/__init__.py | django_su/__init__.py | import os
# The fake password we will use to authenticate su'ed users
SECRET_PASSWORD = os.urandom(64)
VERSION = (0, 4, 8)
__version__ = '.'.join([str(n) for n in VERSION])
| import os
# The fake password we will use to authenticate su'ed users
SECRET_PASSWORD = os.urandom(64)
__version__ = '0.4.8'
| Python | 0.000072 |
3566e996b350b1b5e74caa886b69c17b13ba4913 | Add HTTPs support to assertRedirectsTo | django_test_mixins.py | django_test_mixins.py | from django.test import TestCase
from django.core.cache import cache
import urlparse
class HttpCodeTestCase(TestCase):
# TODO: this should be a private method.
def assertHttpCode(self, response, code, code_description):
self.assertEqual(
response.status_code, code,
"Expected an HTTP %s (%s) response, but got HTTP %s" %
(code, code_description, response.status_code))
def assertHttpOK(self, response):
self.assertHttpCode(response, 200, "OK")
def assertHttpCreated(self, response):
self.assertHttpCode(response, 201, "Created")
def assertHttpRedirect(self, response, location=None):
"""Assert that we had any redirect status code.
"""
self.assertTrue(
300 <= response.status_code < 400,
"Expected an HTTP 3XX (redirect) response, but got HTTP %s" %
response.status_code
)
if location:
if location.startswith("http://testserver/"):
absolute_location = location
else:
absolute_location = urlparse.urljoin("http://testserver/", location)
self.assertEqual(response['Location'], absolute_location)
def assertHttpBadRequest(self, response):
self.assertHttpCode(response, 400, "Bad Request")
def assertHttpUnauthorized(self, response):
self.assertHttpCode(response, 401, "Unauthorized")
def assertHttpForbidden(self, response):
self.assertHttpCode(response, 403, "Forbidden")
def assertHttpNotFound(self, response):
self.assertHttpCode(response, 404, "Not Found")
def assertHttpMethodNotAllowed(self, response):
self.assertHttpCode(response, 405, "Method Not Allowed")
class EmptyCacheTestCase(TestCase):
"""Ensure that every test starts with an empty cache."""
def setUp(self):
super(EmptyCacheTestCase, self).setUp()
cache.clear()
class FormValidationTestCase(TestCase):
def assertFormInvalid(self, response, form_name="form"):
"""Assert that the response contains a form in the context, and that
the form failed validation. The form is assumed to be in
context[form_name].
If the form has validated when it shouldn't, views often
redirect somewhere, so we also check for HTTP 200.
"""
form = None
try:
if response.context:
form = response.context[form_name]
except KeyError:
pass
if not form:
self.fail("Could not find a form in the response.")
self.assertFalse(form.is_valid(), "Expected form to be invalid, but it was valid.")
status_code = response.status_code
self.assertEqual(
status_code, 200,
"Expected HTTP 200, but got HTTP %d. "
"Looks like the form validated when it shouldn't." % status_code)
class RedirectTestCase(TestCase):
def assertRedirectsTo(self, response, expected_url):
"""Django's assertRedirects doesn't support external URLs, so we roll
our own here. Note that the test client can't fetch external
URLs, so we mustn't use fetch=True.
"""
if response.status_code != 302:
self.fail("Did not redirect (got HTTP %s instead)." % response.status_code)
if hasattr(response, "redirect_chain"):
self.fail("You can't use assertRedirects with follow=True.")
final_url = response._headers['location'][1]
if not expected_url.startswith('http://') and not expected_url.startswith('https://'):
# we were given a relative URL, so convert it
expected_url = "http://testserver%s" % expected_url
self.assertEqual(
final_url, expected_url,
"Expected to be redirected to %s, but got %s instead." % (expected_url, final_url)
)
| from django.test import TestCase
from django.core.cache import cache
import urlparse
class HttpCodeTestCase(TestCase):
# TODO: this should be a private method.
def assertHttpCode(self, response, code, code_description):
self.assertEqual(
response.status_code, code,
"Expected an HTTP %s (%s) response, but got HTTP %s" %
(code, code_description, response.status_code))
def assertHttpOK(self, response):
self.assertHttpCode(response, 200, "OK")
def assertHttpCreated(self, response):
self.assertHttpCode(response, 201, "Created")
def assertHttpRedirect(self, response, location=None):
"""Assert that we had any redirect status code.
"""
self.assertTrue(
300 <= response.status_code < 400,
"Expected an HTTP 3XX (redirect) response, but got HTTP %s" %
response.status_code
)
if location:
if location.startswith("http://testserver/"):
absolute_location = location
else:
absolute_location = urlparse.urljoin("http://testserver/", location)
self.assertEqual(response['Location'], absolute_location)
def assertHttpBadRequest(self, response):
self.assertHttpCode(response, 400, "Bad Request")
def assertHttpUnauthorized(self, response):
self.assertHttpCode(response, 401, "Unauthorized")
def assertHttpForbidden(self, response):
self.assertHttpCode(response, 403, "Forbidden")
def assertHttpNotFound(self, response):
self.assertHttpCode(response, 404, "Not Found")
def assertHttpMethodNotAllowed(self, response):
self.assertHttpCode(response, 405, "Method Not Allowed")
class EmptyCacheTestCase(TestCase):
"""Ensure that every test starts with an empty cache."""
def setUp(self):
super(EmptyCacheTestCase, self).setUp()
cache.clear()
class FormValidationTestCase(TestCase):
def assertFormInvalid(self, response, form_name="form"):
"""Assert that the response contains a form in the context, and that
the form failed validation. The form is assumed to be in
context[form_name].
If the form has validated when it shouldn't, views often
redirect somewhere, so we also check for HTTP 200.
"""
form = None
try:
if response.context:
form = response.context[form_name]
except KeyError:
pass
if not form:
self.fail("Could not find a form in the response.")
self.assertFalse(form.is_valid(), "Expected form to be invalid, but it was valid.")
status_code = response.status_code
self.assertEqual(
status_code, 200,
"Expected HTTP 200, but got HTTP %d. "
"Looks like the form validated when it shouldn't." % status_code)
class RedirectTestCase(TestCase):
def assertRedirectsTo(self, response, expected_url):
"""Django's assertRedirects doesn't support external URLs, so we roll
our own here. Note that the test client can't fetch external
URLs, so we mustn't use fetch=True.
"""
if response.status_code != 302:
self.fail("Did not redirect (got HTTP %s instead)." % response.status_code)
if hasattr(response, "redirect_chain"):
self.fail("You can't use assertRedirects with follow=True.")
final_url = response._headers['location'][1]
if not expected_url.startswith('http://'):
# we were given a relative URL, so convert it
expected_url = "http://testserver%s" % expected_url
self.assertEqual(
final_url, expected_url,
"Expected to be redirected to %s, but got %s instead." % (expected_url, final_url)
)
| Python | 0 |
f860d338ae22c73ad7e313bf9cd268014be138db | Add Dense Layer to SpecGAN Discriminator | structures/SpecGAN.py | structures/SpecGAN.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Dense, ReLU, LeakyReLU, Conv2D, Conv2DTranspose, Reshape, AveragePooling1D, Flatten
from tensorflow.keras import Model, Sequential
class Generator(Model):
def __init__(self, channels=1, d=4):
super(Generator, self).__init__()
layers = []
layers.append(Dense(256*d))
layers.append(Reshape((4, 4, 16*d)))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=8*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=4*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=2*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=1*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=channels, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=channels, kernel_size=(6,6), strides=(1,2), padding='same'))
self.l = Sequential(layers)
def call(self, z):
return self.l(z)
class Discriminator(Model):
def __init__(self, d=4):
super(Discriminator, self).__init__()
layers = []
layers.append(Conv2D(filters=d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=2*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=4*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=8*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=16*d, kernel_size=(6,6), strides=(1,2), padding='same'))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Flatten())
layers.append(Dense(1))
self.l = Sequential(layers)
def call(self, x):
return self.l(x)
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Dense, ReLU, LeakyReLU, Conv2D, Conv2DTranspose, Reshape, AveragePooling1D, Flatten
from tensorflow.keras import Model, Sequential
class Generator(Model):
def __init__(self, channels=1, d=4):
super(Generator, self).__init__()
layers = []
layers.append(Dense(256*d))
layers.append(Reshape((4, 4, 16*d)))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=8*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=4*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=2*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=1*d, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=channels, kernel_size=(6,6), strides=(2,2), padding='same'))
layers.append(ReLU())
layers.append(Conv2DTranspose(filters=channels, kernel_size=(6,6), strides=(1,2), padding='same'))
self.l = Sequential(layers)
def call(self, z):
return self.l(z)
class Discriminator(Model):
def __init__(self, channels=1, d=4):
super(Discriminator, self).__init__()
layers = []
layers.append(Conv2D(filters=d, kernel_size=(6,6), strides=(2,2)))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=2*d, kernel_size=(6,6), strides=(2,2)))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=4*d, kernel_size=(6,6), strides=(2,2)))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=8*d, kernel_size=(6,6), strides=(2,2)))
layers.append(LeakyReLU(alpha=0.2))
layers.append(Conv2D(filters=16*d, kernel_size=(6,6), strides=(2,2)))
layers.append(LeakyReLU(alpha=0.2))
self.l = Sequential(layers)
def call(self, x):
return self.l(x)
| Python | 0.000001 |
f623312b5df6e8f201f641f87193075e8d3f70ea | Add version attribute | nuxeo-drive-client/nxdrive/__init__.py | nuxeo-drive-client/nxdrive/__init__.py | _version_ = '1.0.0-dev' | Python | 0 | |
c9277fa65afcf513c2e3000193d7837900ff8ee1 | Improve logging runtime state poll fail message | src/nodeconductor_openstack/tasks/base.py | src/nodeconductor_openstack/tasks/base.py | from celery import shared_task
from nodeconductor.core.tasks import Task
from .. import models
# TODO: move this signal to itacloud assembly application
@shared_task
def register_instance_in_zabbix(instance_uuid):
from nodeconductor.template.zabbix import register_instance
instance = models.Instance.objects.get(uuid=instance_uuid)
register_instance(instance)
class RuntimeStateException(Exception):
pass
class PollRuntimeStateTask(Task):
max_retries = 300
default_retry_delay = 5
def get_backend(self, instance):
return instance.get_backend()
def execute(self, instance, backend_pull_method, success_state, erred_state):
backend = self.get_backend(instance)
getattr(backend, backend_pull_method)(instance)
instance.refresh_from_db()
if instance.runtime_state not in (success_state, erred_state):
self.retry()
elif instance.runtime_state == erred_state:
raise RuntimeStateException(
'%s %s (PK: %s) runtime state become erred: %s' % (
instance.__class__.__name__, instance, instance.pk, erred_state))
return instance
class PollBackendCheckTask(Task):
max_retries = 60
default_retry_delay = 5
def get_backend(self, instance):
return instance.get_backend()
def execute(self, instance, backend_check_method):
# backend_check_method should return True if object does not exist at backend
backend = self.get_backend(instance)
if not getattr(backend, backend_check_method)(instance):
self.retry()
return instance
| from celery import shared_task
from nodeconductor.core.tasks import Task
from .. import models
# TODO: move this signal to itacloud assembly application
@shared_task
def register_instance_in_zabbix(instance_uuid):
from nodeconductor.template.zabbix import register_instance
instance = models.Instance.objects.get(uuid=instance_uuid)
register_instance(instance)
class RuntimeStateException(Exception):
pass
class PollRuntimeStateTask(Task):
max_retries = 300
default_retry_delay = 5
def get_backend(self, instance):
return instance.get_backend()
def execute(self, instance, backend_pull_method, success_state, erred_state):
backend = self.get_backend(instance)
getattr(backend, backend_pull_method)(instance)
instance.refresh_from_db()
if instance.runtime_state not in (success_state, erred_state):
self.retry()
elif instance.runtime_state == erred_state:
raise RuntimeStateException(
'Instance %s (PK: %s) runtime state become erred: %s' % (instance, instance.pk, erred_state))
return instance
class PollBackendCheckTask(Task):
max_retries = 60
default_retry_delay = 5
def get_backend(self, instance):
return instance.get_backend()
def execute(self, instance, backend_check_method):
# backend_check_method should return True if object does not exist at backend
backend = self.get_backend(instance)
if not getattr(backend, backend_check_method)(instance):
self.retry()
return instance
| Python | 0.000001 |
f165ddf15914bd9b1c3720e99b3171fb73d331a3 | kNN remove l5 distance | ParamSklearn/components/classification/k_nearest_neighbors.py | ParamSklearn/components/classification/k_nearest_neighbors.py | import sklearn.neighbors
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter, \
Constant, UnParametrizedHyperparameter, UniformIntegerHyperparameter
from HPOlibConfigSpace.conditions import EqualsCondition
from ParamSklearn.components.classification_base import ParamSklearnClassificationAlgorithm
from ParamSklearn.util import DENSE, SPARSE, PREDICTIONS
class KNearestNeighborsClassifier(ParamSklearnClassificationAlgorithm):
def __init__(self, n_neighbors, weights, algorithm='auto', p=2,
leaf_size=30, random_state=None):
self.n_neighbors = int(n_neighbors)
if weights not in ("uniform", "distance"):
raise ValueError("'weights' should be in ('uniform', 'distance'): "
"%s" % weights)
self.weights = weights
#if metric not in ("euclidean", "manhattan", "chebyshev", "minkowski"):
# raise ValueError("'metric' should be in ('euclidean',
# 'chebyshev', "
# "'manhattan', 'minkowski'): %s" % metric)
#self.metric = metric
self.algorithm = algorithm
self.p = int(p)
self.leaf_size = int(leaf_size)
self.random_state = random_state
def fit(self, X, Y):
self.estimator = \
sklearn.neighbors.KNeighborsClassifier(
n_neighbors=self.n_neighbors, weights=self.weights,
p=self.p, algorithm=self.algorithm,
leaf_size=self.leaf_size)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties():
return {'shortname': 'KNN',
'name': 'K-Nearest Neighbor Classification',
'handles_missing_values': False,
'handles_nominal_values': False,
'handles_numerical_features': True,
'prefers_data_scaled': True,
# Find out if this is good because of sparsity
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': False,
'is_deterministic': True,
'handles_sparse': True,
'input': (DENSE, SPARSE),
'output': PREDICTIONS,
# TODO find out what is best used here!
'preferred_dtype' : None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
n_neighbors = UniformIntegerHyperparameter(
name="n_neighbors", lower=1, upper=100, default=1)
weights = CategoricalHyperparameter(
name="weights", choices=["uniform", "distance"], default="uniform")
algorithm = Constant(name='algorithm', value="auto")
p = CategoricalHyperparameter(
name="p", choices=[1, 2], default=2)
leaf_size = Constant(name="leaf_size", value=30)
cs = ConfigurationSpace()
cs.add_hyperparameter(n_neighbors)
cs.add_hyperparameter(weights)
#cs.add_hyperparameter(metric)
cs.add_hyperparameter(algorithm)
cs.add_hyperparameter(p)
cs.add_hyperparameter(leaf_size)
# Conditions
#metric_p = EqualsCondition(parent=metric, child=p, value="minkowski")
#cs.add_condition(metric_p)
return cs
| import sklearn.neighbors
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter, \
Constant, UnParametrizedHyperparameter, UniformIntegerHyperparameter
from HPOlibConfigSpace.conditions import EqualsCondition
from ParamSklearn.components.classification_base import ParamSklearnClassificationAlgorithm
from ParamSklearn.util import DENSE, SPARSE, PREDICTIONS
class KNearestNeighborsClassifier(ParamSklearnClassificationAlgorithm):
def __init__(self, n_neighbors, weights, algorithm='auto', p=2,
leaf_size=30, random_state=None):
self.n_neighbors = int(n_neighbors)
if weights not in ("uniform", "distance"):
raise ValueError("'weights' should be in ('uniform', 'distance'): "
"%s" % weights)
self.weights = weights
#if metric not in ("euclidean", "manhattan", "chebyshev", "minkowski"):
# raise ValueError("'metric' should be in ('euclidean',
# 'chebyshev', "
# "'manhattan', 'minkowski'): %s" % metric)
#self.metric = metric
self.algorithm = algorithm
self.p = int(p)
self.leaf_size = int(leaf_size)
self.random_state = random_state
def fit(self, X, Y):
self.estimator = \
sklearn.neighbors.KNeighborsClassifier(
n_neighbors=self.n_neighbors, weights=self.weights,
p=self.p, algorithm=self.algorithm,
leaf_size=self.leaf_size)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties():
return {'shortname': 'KNN',
'name': 'K-Nearest Neighbor Classification',
'handles_missing_values': False,
'handles_nominal_values': False,
'handles_numerical_features': True,
'prefers_data_scaled': True,
# Find out if this is good because of sparsity
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': False,
'is_deterministic': True,
'handles_sparse': True,
'input': (DENSE, SPARSE),
'output': PREDICTIONS,
# TODO find out what is best used here!
'preferred_dtype' : None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
n_neighbors = UniformIntegerHyperparameter(
name="n_neighbors", lower=1, upper=100, default=1)
weights = CategoricalHyperparameter(
name="weights", choices=["uniform", "distance"], default="uniform")
algorithm = Constant(name='algorithm', value="auto")
if dataset_properties is not None and dataset_properties.get('sparse'):
p_choices = [1, 2]
else:
p_choices = [1, 2, 5]
p = CategoricalHyperparameter(
name="p", choices=p_choices, default=2)
leaf_size = Constant(name="leaf_size", value=30)
# Unparametrized
# TODO: If we further parametrize 'metric' we need more metric params
#metric = UnParametrizedHyperparameter(name="metric", value="minkowski")
cs = ConfigurationSpace()
cs.add_hyperparameter(n_neighbors)
cs.add_hyperparameter(weights)
#cs.add_hyperparameter(metric)
cs.add_hyperparameter(algorithm)
cs.add_hyperparameter(p)
cs.add_hyperparameter(leaf_size)
# Conditions
#metric_p = EqualsCondition(parent=metric, child=p, value="minkowski")
#cs.add_condition(metric_p)
return cs
| Python | 0.999757 |
e3035fb91a96a3ff5627b6847203e3dc11fbc78f | Add libunwind-1.2.1 (#8145) | var/spack/repos/builtin/packages/libunwind/package.py | var/spack/repos/builtin/packages/libunwind/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libunwind(AutotoolsPackage):
"""A portable and efficient C programming interface (API) to determine
the call-chain of a program."""
homepage = "http://www.nongnu.org/libunwind/"
url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz"
version('1.2.1', '06ba9e60d92fd6f55cd9dadb084df19e')
version('1.1', 'fb4ea2f6fbbe45bf032cd36e586883ce')
| ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libunwind(AutotoolsPackage):
"""A portable and efficient C programming interface (API) to determine
the call-chain of a program."""
homepage = "http://www.nongnu.org/libunwind/"
url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz"
version('1.1', 'fb4ea2f6fbbe45bf032cd36e586883ce')
| Python | 0.000002 |
77db7bb96686e3308a4061d24c257466d9987405 | add delete_project dashboard api. | rio/blueprints/dashboard.py | rio/blueprints/dashboard.py | # -*- coding: utf-8 -*-
from slugify import slugify
from flask import Blueprint
from flask import jsonify
from flask_wtf import Form
from wtforms import StringField
from wtforms.validators import DataRequired
from wtforms.validators import ValidationError
from wtforms.validators import Length
from rio.utils.user import get_current_user_id
from rio.utils.user import login_required
from rio.utils.slugify import slugify
from rio.models import add_instance
from rio.models import delete_instance
from rio.models import get_data_or_404
bp = Blueprint('dashboard', __name__)
class NewProjectForm(Form):
name = StringField('Name', validators=[DataRequired(), Length(max=64)])
class ConfirmDeleteProjectForm(Form):
name = StringField('Name', validators=[DataRequired(), Length(max=64)])
@bp.errorhandler(404)
def handle_not_found(exception):
return jsonify(message='not found'), 404
@bp.route('/projects/new', methods=['POST'])
@login_required
def new_project():
"""New Project."""
form = NewProjectForm()
if not form.validate_on_submit():
return jsonify(errors=form.errors), 400
data = form.data
data['slug'] = slugify(data['name'])
data['owner_id'] = get_current_user_id()
id = add_instance('project', **data)
if not id:
return jsonify(errors={'name': ['duplicated slug.']}), 400
project = get_data_or_404('project', id)
return jsonify(**project)
@bp.route('/projects/<int:project_id>', methods=['DELETE'])
@login_required
def delete_project(project_id):
"""Delete Project."""
project = get_data_or_404('project', project_id)
if project['owner_id'] != get_current_user_id():
return jsonify(message='forbidden'), 403
delete_instance('project', project_id)
return jsonify({})
@bp.route('/projects/<int:project_id>/transfer', methods=['POST'])
def transfer_project(project_id):
pass
| # -*- coding: utf-8 -*-
from slugify import slugify
from flask import Blueprint
from flask import jsonify
from flask_wtf import Form
from wtforms import StringField
from wtforms.validators import DataRequired
from wtforms.validators import ValidationError
from wtforms.validators import Length
from rio.utils.user import get_current_user_id
from rio.utils.user import login_required
from rio.utils.slugify import slugify
from rio.models import add_instance
from rio.models import get_data_or_404
bp = Blueprint('dashboard', __name__)
class NewProjectForm(Form):
name = StringField('Name', validators=[DataRequired(), Length(max=64)])
class ConfirmDeleteProjectForm(Form):
name = StringField('Name', validators=[DataRequired(), Length(max=64)])
@bp.errorhandler(404)
def handle_not_found(exception):
return jsonify(message='not found'), 404
@bp.route('/projects/new', methods=['POST'])
@login_required
def new_project():
"""New Project."""
form = NewProjectForm()
if not form.validate_on_submit():
return jsonify(errors=form.errors), 400
data = form.data
data['slug'] = slugify(data['name'])
data['owner_id'] = get_current_user_id()
id = add_instance('project', **data)
if not id:
return jsonify(errors={'name': ['duplicated slug.']}), 400
project = get_data_or_404('project', id)
return jsonify(**project)
@bp.route('/projects/<int:project_id>', methods=['DELETE'])
@login_required
def delete_project(project_id):
project = get_data_or_404('project', project_id)
if project['owner_id'] != get_current_user_id():
return jsonify(message='forbidden'), 403
# TODO: implement delete_project
task = delete_project.delay(project_id)
return jsonify()
@bp.route('/projects/<int:project_id>/transfer', methods=['POST'])
def transfer_project(project_id):
pass
| Python | 0 |
91a77b860387ebed146b9e4e604d007bfabf0b9e | Fix potential bug in parameter passing | lib/ansible/plugins/action/normal.py | lib/ansible/plugins/action/normal.py | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
results = self._execute_module(tmp=tmp, task_vars=task_vars)
# Remove special fields from the result, which can only be set
# internally by the executor engine. We do this only here in
# the 'normal' action, as other action plugins may set this.
for field in ('ansible_notify',):
if field in results:
results.pop(field)
return results
| # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
results = self._execute_module(tmp, task_vars=task_vars)
# Remove special fields from the result, which can only be set
# internally by the executor engine. We do this only here in
# the 'normal' action, as other action plugins may set this.
for field in ('ansible_notify',):
if field in results:
results.pop(field)
return results
| Python | 0.000001 |
e0db9a970c6ea778419cc1f20ca66adedffb7aae | Set HOME, allow errors to pass through to stdout/stderr | utils/mwm.py | utils/mwm.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import shutil
import subprocess
import tempfile
from string import Template
from .artifact import Artifact
LOG = logging.getLogger(__name__)
class MWM(object):
name = 'mwm'
description = 'maps.me MWM'
cmd = Template('generate_mwm.sh $input')
def __init__(self, input):
"""
Initialize the MWM generation utility.
Args:
pbf: the source PBF
"""
self.input = input
self.output = os.path.splitext(input)[0] + '.mwm'
def run(self):
if self.is_complete:
LOG.debug("Skipping MWM, file exists")
return
convert_cmd = self.cmd.safe_substitute({
'input': self.input,
})
LOG.debug('Running: %s' % convert_cmd)
tmpdir = tempfile.mkdtemp()
env = os.environ.copy()
env.update(HOME=tmpdir, MWM_WRITABLE_DIR=tmpdir, TARGET=os.path.dirname(self.output))
try:
subprocess.check_call(
convert_cmd,
env=env,
shell=True,
executable='/bin/bash')
LOG.debug('generate_mwm.sh complete')
finally:
shutil.rmtree(tmpdir)
@property
def results(self):
return [Artifact([self.output], self.name)]
@property
def is_complete(self):
return os.path.isfile(self.output)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import shutil
import subprocess
import tempfile
from string import Template
from .artifact import Artifact
LOG = logging.getLogger(__name__)
class MWM(object):
name = 'mwm'
description = 'maps.me MWM'
cmd = Template('generate_mwm.sh $input')
def __init__(self, input):
"""
Initialize the MWM generation utility.
Args:
pbf: the source PBF
"""
self.input = input
self.output = os.path.splitext(input)[0] + '.mwm'
def run(self):
if self.is_complete:
LOG.debug("Skipping MWM, file exists")
return
convert_cmd = self.cmd.safe_substitute({
'input': self.input,
})
LOG.debug('Running: %s' % convert_cmd)
tmpdir = tempfile.mkdtemp()
env = os.environ.copy()
env.update(MWM_WRITABLE_DIR=tmpdir, TARGET=os.path.dirname(self.output))
try:
subprocess.check_call(
convert_cmd,
env=env,
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOG.debug('generate_mwm.sh complete')
finally:
shutil.rmtree(tmpdir)
@property
def results(self):
return [Artifact([self.output], self.name)]
@property
def is_complete(self):
return os.path.isfile(self.output)
| Python | 0 |
bbcd5e00a4dcd991b9699ef6ae19339325bff7fd | Clean history bug resolved For a large data using batch size was not filtering proberly. So updated the batch logic LEARNER-2697 | ecommerce/core/management/commands/clean_history.py | ecommerce/core/management/commands/clean_history.py | from __future__ import unicode_literals
import logging
import time
from dateutil.parser import parse
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from oscar.core.loading import get_model
from ecommerce.courses.models import Course
from ecommerce.invoice.models import Invoice
logger = logging.getLogger(__name__)
Order = get_model('order', 'Order')
OrderLine = get_model('order', 'Line')
Product = get_model('catalogue', 'Product')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
Refund = get_model('refund', 'Refund')
RefundLine = get_model('refund', 'RefundLine')
StockRecord = get_model('partner', 'StockRecord')
class Command(BaseCommand):
help = 'Clean history data'
def add_arguments(self, parser):
parser.add_argument('--cutoff_date',
action='store',
dest='cutoff_date',
type=str,
required=True,
help='Cutoff date before which the history data should be cleaned. '
'format is YYYY-MM-DD')
parser.add_argument('--batch_size',
action='store',
dest='batch_size',
type=int,
default=1000,
help='Maximum number of database rows to delete per query. '
'This helps avoid locking the database when deleting large amounts of data.')
parser.add_argument('--sleep_time',
action='store',
dest='sleep_time',
type=int,
default=10,
help='Sleep time between deletion of batches')
def handle(self, *args, **options):
cutoff_date = options['cutoff_date']
batch_size = options['batch_size']
sleep_time = options['sleep_time']
try:
cutoff_date = parse(cutoff_date)
except: # pylint: disable=bare-except
msg = 'Failed to parse cutoff date: {}'.format(cutoff_date)
logger.exception(msg)
raise CommandError(msg)
models = (
Order, OrderLine, Refund, RefundLine, ProductAttributeValue, Product, StockRecord, Course, Invoice,
)
for model in models:
qs = model.history.filter(history_date__lte=cutoff_date).order_by('-pk')
message = 'Cleaning {} rows from {} table'.format(qs.count(), model.__name__)
logger.info(message)
try:
# use Primary keys sorting to make sure unique batching as
# filtering batch does not work for huge data
max_pk = qs[0].pk
batch_start = qs.reverse()[0].pk
batch_stop = batch_start + batch_size
except IndexError:
continue
logger.info(message)
while batch_start <= max_pk:
queryset = model.history.filter(pk__gte=batch_start, pk__lt=batch_stop)
with transaction.atomic():
queryset.delete()
logger.info(
'Deleted instances of %s with PKs between %d and %d',
model.__name__, batch_start, batch_stop
)
if batch_stop < max_pk:
time.sleep(sleep_time)
batch_start = batch_stop
batch_stop += batch_size
| from __future__ import unicode_literals
import logging
import time
from dateutil.parser import parse
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from oscar.core.loading import get_model
from ecommerce.courses.models import Course
from ecommerce.invoice.models import Invoice
logger = logging.getLogger(__name__)
Order = get_model('order', 'Order')
OrderLine = get_model('order', 'Line')
Product = get_model('catalogue', 'Product')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
Refund = get_model('refund', 'Refund')
RefundLine = get_model('refund', 'RefundLine')
StockRecord = get_model('partner', 'StockRecord')
class Command(BaseCommand):
help = 'Clean history data'
def add_arguments(self, parser):
parser.add_argument('--cutoff_date',
action='store',
dest='cutoff_date',
type=str,
required=True,
help='Cutoff date before which the history data should be cleaned. '
'format is YYYY-MM-DD')
parser.add_argument('--batch_size',
action='store',
dest='batch_size',
type=int,
default=1000,
help='Maximum number of database rows to delete per query. '
'This helps avoid locking the database when deleting large amounts of data.')
parser.add_argument('--sleep_time',
action='store',
dest='sleep_time',
type=int,
default=10,
help='Sleep time between deletion of batches')
def handle(self, *args, **options):
cutoff_date = options['cutoff_date']
batch_size = options['batch_size']
sleep_time = options['sleep_time']
try:
cutoff_date = parse(cutoff_date)
except: # pylint: disable=bare-except
msg = 'Failed to parse cutoff date: {}'.format(cutoff_date)
logger.exception(msg)
raise CommandError(msg)
models = (
Order, OrderLine, Refund, RefundLine, ProductAttributeValue, Product, StockRecord, Course, Invoice,
)
for model in models:
qs = model.history.filter(history_date__lte=cutoff_date)
message = 'Cleaning {} rows from {} table'.format(qs.count(), model.__name__)
logger.info(message)
qs = qs[:batch_size]
while qs.exists():
history_batch = list(qs.values_list('id', flat=True))
with transaction.atomic():
model.history.filter(pk__in=history_batch).delete()
logger.info(
'Deleted instances of %s with PKs between %d and %d',
model.__name__, history_batch[0], history_batch[-1]
)
time.sleep(sleep_time)
qs = model.history.filter(history_date__lte=cutoff_date)[:batch_size]
| Python | 0 |
33da474861334d361f3e990eda2518f919158726 | Fix reading from socket for Python 3 (PY-15772). | python/helpers/profiler/prof_io.py | python/helpers/profiler/prof_io.py | import traceback
from _prof_imports import TSerialization
from _prof_imports import TJSONProtocol
from _prof_imports import ProfilerRequest
from _prof_imports import IS_PY3K
from prof_util import ProfDaemonThread
import struct
def send_message(sock, message):
""" Send a serialized message (protobuf Message interface)
to a socket, prepended by its length packed in 4
bytes (big endian).
"""
s = TSerialization.serialize(message, TJSONProtocol.TJSONProtocolFactory())
packed_len = struct.pack('>L', len(s))
sock.sendall(packed_len + s)
def get_message(sock, msgtype):
""" Read a message from a socket. msgtype is a subclass of
of protobuf Message.
"""
len_buf = socket_read_n(sock, 4)
msg_len = struct.unpack('>L', len_buf)[0]
msg_buf = socket_read_n(sock, msg_len)
msg = msgtype()
TSerialization.deserialize(msg, msg_buf, TJSONProtocol.TJSONProtocolFactory())
return msg
def socket_read_n(sock, n):
""" Read exactly n bytes from the socket.
Raise RuntimeError if the connection closed before
n bytes were read.
"""
if IS_PY3K:
buf = bytearray()
else:
buf = ''
while n > 0:
data = sock.recv(n)
if data == '':
raise RuntimeError('unexpected connection close')
buf += data
n -= len(data)
return buf
class ProfWriter(object):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
self.sock = sock
def addCommand(self, message):
send_message(self.sock, message)
class ProfReader(ProfDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock, message_processor):
ProfDaemonThread.__init__(self)
self.sock = sock
self.processor = message_processor
self.setName("profiler.Reader")
def OnRun(self):
try:
while not self.killReceived:
try:
message = get_message(self.sock, ProfilerRequest)
except:
traceback.print_exc()
return # Finished communication.
try:
self.processor.process(message)
except:
traceback.print_exc()
except:
traceback.print_exc() | import traceback
from _prof_imports import TSerialization
from _prof_imports import TJSONProtocol
from _prof_imports import ProfilerRequest
from _prof_imports import IS_PY3K
from prof_util import ProfDaemonThread
import struct
def send_message(sock, message):
""" Send a serialized message (protobuf Message interface)
to a socket, prepended by its length packed in 4
bytes (big endian).
"""
s = TSerialization.serialize(message, TJSONProtocol.TJSONProtocolFactory())
packed_len = struct.pack('>L', len(s))
sock.sendall(packed_len + s)
def get_message(sock, msgtype):
""" Read a message from a socket. msgtype is a subclass of
of protobuf Message.
"""
len_buf = socket_read_n(sock, 4)
msg_len = struct.unpack('>L', len_buf)[0]
msg_buf = socket_read_n(sock, msg_len)
msg = msgtype()
TSerialization.deserialize(msg, msg_buf, TJSONProtocol.TJSONProtocolFactory())
return msg
def socket_read_n(sock, n):
""" Read exactly n bytes from the socket.
Raise RuntimeError if the connection closed before
n bytes were read.
"""
buf = ''
if IS_PY3K:
buf = bytearray()
while n > 0:
data = sock.recv(n)
if data == '':
raise RuntimeError('unexpected connection close')
buf += data
n -= len(data)
return buf
class ProfWriter(object):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
self.sock = sock
def addCommand(self, message):
send_message(self.sock, message)
class ProfReader(ProfDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock, message_processor):
ProfDaemonThread.__init__(self)
self.sock = sock
self.processor = message_processor
self.setName("profiler.Reader")
def OnRun(self):
try:
while not self.killReceived:
try:
message = get_message(self.sock, ProfilerRequest)
except:
traceback.print_exc()
return # Finished communication.
try:
self.processor.process(message)
except:
traceback.print_exc()
except:
traceback.print_exc() | Python | 0 |
ee42b37a7dff1e111d7b4df71ece818e7c2f2d38 | set Keen.io settings | buildtimetrend/settings.py | buildtimetrend/settings.py | # vim: set expandtab sw=4 ts=4:
# pylint: disable=invalid-name,too-few-public-methods
'''
Manages settings of buildtime trend
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import yaml
import keen
import buildtimetrend
from buildtimetrend.collection import Collection
from buildtimetrend.tools import check_file
class Settings(object):
'''
Settings class is a singleton
Inspired by
http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Singleton.html
'''
class __Settings(object):
'''
Settings class contains settings and config options
'''
def __init__(self):
'''
Initialise class
'''
self.settings = Collection()
# set project name
project_name = buildtimetrend.NAME
# use Travis repo slug as project name
if 'TRAVIS_REPO_SLUG' in os.environ:
project_name = os.getenv('TRAVIS_REPO_SLUG')
self.set_project_name(project_name)
def set_project_name(self, name):
'''
Set project name
Parameters :
- name : project name
'''
self.add_setting("project_name", name)
def get_project_name(self):
'''
Get project name
'''
return self.get_setting("project_name")
def add_setting(self, name, value):
'''
Add a setting
Parameters :
- name : Setting name
- value : Setting value
'''
self.settings.add_item(name, value)
def get_setting(self, name):
'''
Get a setting value
Parameters :
- name : Setting name
'''
return self.settings.get_item(name)
def load_config_file(self, config_file):
'''
Load settings from a config file
Parameters :
- config_file : name of the config file
'''
if not check_file(config_file):
return False
with open(config_file, 'rb') as file_stream:
config = yaml.load(file_stream)
self.settings.add_items(config["buildtimetrend"])
# set Keen.io settings
if "keen" in config:
if "project_id" in config["keen"]:
keen.project_id = config["keen"]["project_id"]
if "write_key" in config["keen"]:
keen.write_key = config["keen"]["write_key"]
if "read_key" in config["keen"]:
keen.read_key = config["keen"]["read_key"]
return True
def get_project_info(self):
'''
Get project info as a dictonary
'''
return {
"version": buildtimetrend.VERSION,
"schema_version": buildtimetrend.SCHEMA_VERSION,
"project_name": str(self.get_project_name())
}
instance = None
def __new__(cls): # __new__ always a classmethod
''' Create a singleton '''
if not Settings.instance:
Settings.instance = Settings.__Settings()
return Settings.instance
def __getattr__(self, name):
''' Redirect access to get singleton properties '''
return getattr(self.instance, name)
def __setattr__(self, name):
''' Redirect access to set singleton properties '''
return setattr(self.instance, name)
| # vim: set expandtab sw=4 ts=4:
# pylint: disable=invalid-name,too-few-public-methods
'''
Manages settings of buildtime trend
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import yaml
import buildtimetrend
from buildtimetrend.collection import Collection
from buildtimetrend.tools import check_file
class Settings(object):
'''
Settings class is a singleton
Inspired by
http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Singleton.html
'''
class __Settings(object):
'''
Settings class contains settings and config options
'''
def __init__(self):
'''
Initialise class
'''
self.settings = Collection()
# set project name
project_name = buildtimetrend.NAME
# use Travis repo slug as project name
if 'TRAVIS_REPO_SLUG' in os.environ:
project_name = os.getenv('TRAVIS_REPO_SLUG')
self.set_project_name(project_name)
def set_project_name(self, name):
'''
Set project name
Parameters :
- name : project name
'''
self.add_setting("project_name", name)
def get_project_name(self):
'''
Get project name
'''
return self.get_setting("project_name")
def add_setting(self, name, value):
'''
Add a setting
Parameters :
- name : Setting name
- value : Setting value
'''
self.settings.add_item(name, value)
def get_setting(self, name):
'''
Get a setting value
Parameters :
- name : Setting name
'''
return self.settings.get_item(name)
def load_config_file(self, config_file):
'''
Load settings from a config file
Parameters :
- config_file : name of the config file
'''
if not check_file(config_file):
return False
with open(config_file, 'rb') as file_stream:
config = yaml.load(file_stream)
self.settings.add_items(config["buildtimetrend"])
return True
def get_project_info(self):
'''
Get project info as a dictonary
'''
return {
"version": buildtimetrend.VERSION,
"schema_version": buildtimetrend.SCHEMA_VERSION,
"project_name": str(self.get_project_name())
}
instance = None
def __new__(cls): # __new__ always a classmethod
''' Create a singleton '''
if not Settings.instance:
Settings.instance = Settings.__Settings()
return Settings.instance
def __getattr__(self, name):
''' Redirect access to get singleton properties '''
return getattr(self.instance, name)
def __setattr__(self, name):
''' Redirect access to set singleton properties '''
return setattr(self.instance, name)
| Python | 0 |
12efb71143a18e191e05a1b5f0e6d7c59854e0ba | fix brampton scraper class name | ca_on_brampton/__init__.py | ca_on_brampton/__init__.py | # coding: utf-8
from utils import CanadianJurisdiction
class Brampton(CanadianJurisdiction):
jurisdiction_id = u'ocd-jurisdiction/country:ca/csd:3521010/council'
geographic_code = 3521010
division_name = u'Brampton'
name = u'Brampton City Council'
url = 'http://www.brampton.ca'
| # coding: utf-8
from utils import CanadianJurisdiction
class London(CanadianJurisdiction):
jurisdiction_id = u'ocd-jurisdiction/country:ca/csd:3521010/council'
geographic_code = 3521010
division_name = u'Brampton'
name = u'Brampton City Council'
url = 'http://www.brampton.ca'
| Python | 0.000004 |
a4fbc3372a446861f086d847186726b80443f212 | add utils for printing results; add ndiff table | causalinference/results.py | causalinference/results.py | import numpy as np
from scipy.stats import norm
class Results(object):
def __init__(self, causal):
self.causal = causal
self.table_width = 80
def _varnames(self, varnums):
return ['X'+str(varnum+1) for varnum in varnums]
def _make_row(self, entries):
col_width = self.table_width // len(entries)
first_col_width = col_width + self.table_width % len(entries)
return ('%'+str(first_col_width)+'s' + ('%'+str(col_width)+'.3f')*(len(entries)-1)) % entries
def ndiff(self):
varnames = self._varnames(xrange(self.causal.K))
X_t_mean = self.causal.X_t.mean(0)
X_t_sd = np.sqrt(self.causal.X_t.var(0))
X_c_mean = self.causal.X_c.mean(0)
X_c_sd = np.sqrt(self.causal.X_c.var(0))
for i in xrange(self.causal.K):
print self._make_row((varnames[i], X_t_mean[i], X_t_sd[i], X_c_mean[i], X_c_sd[i], self.causal.ndiff[i]))
def propensity(self):
if not hasattr(self.causal, 'pscore'):
self.causal.propensity()
print 'Coefficients:', self.causal.pscore['coeff']
print 'Log-likelihood:', self.causal.pscore['loglike']
def summary(self):
header = ('%8s'+'%12s'*4+'%24s') % ('', 'est', 'std err', 'z', 'P>|z|', '[95% Conf. Int.]')
print header
print '-' * len(header)
tuples = (('ATE', self.causal.ate, self.causal.ate_se),
('ATT', self.causal.att, self.causal.att_se),
('ATC', self.causal.atc, self.causal.atc_se))
for (name, coef, se) in tuples:
t = coef / se
p = 1 - norm.cdf(np.abs(t))
lw = coef - 1.96*se
up = coef + 1.96*se
print self._make_row((name, coef, se, t, p, lw, up))
| import numpy as np
from scipy.stats import norm
class Results(object):
def __init__(self, causal):
self.causal = causal
def ndiff(self):
print self.causal.ndiff
def propensity(self):
if not hasattr(self.causal, 'pscore'):
self.causal.propensity()
print 'Coefficients:', self.causal.pscore['coeff']
print 'Log-likelihood:', self.causal.pscore['loglike']
def summary(self):
header = ('%8s'+'%12s'*4+'%24s') % ('', 'coef', 'std err', 'z', 'P>|z|', '[95% Conf. Int.]')
print header
print '-' * len(header)
tuples = (('ATE', self.causal.ate, self.causal.ate_se),
('ATT', self.causal.att, self.causal.att_se),
('ATC', self.causal.atc, self.causal.atc_se))
for (name, coef, se) in tuples:
t = coef / se
p = 1 - norm.cdf(np.abs(t))
lw = coef - 1.96*se
up = coef + 1.96*se
print ('%8s'+'%12.3f'*6) % (name, coef, se, t, p, lw, up)
| Python | 0 |
d0ce2b074ffd603c507069d8a5ab1189fad0ca56 | Update a version number from trunk r9016 | pywikibot/families/wikia_family.py | pywikibot/families/wikia_family.py | # -*- coding: utf-8 -*-
__version__ = '$Id$'
import family
# The Wikia Search family
# user-config.py: usernames['wikia']['wikia'] = 'User name'
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = u'wikia'
self.langs = {
u'wikia': None,
}
def hostname(self, code):
return u'www.wikia.com'
def version(self, code):
return "1.16.2"
def scriptpath(self, code):
return ''
def apipath(self, code):
return '/api.php'
| # -*- coding: utf-8 -*-
__version__ = '$Id$'
import family
# The Wikia Search family
# user-config.py: usernames['wikia']['wikia'] = 'User name'
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = u'wikia'
self.langs = {
u'wikia': None,
}
def hostname(self, code):
return u'www.wikia.com'
def version(self, code):
return "1.15.1"
def scriptpath(self, code):
return ''
def apipath(self, code):
return '/api.php'
| Python | 0 |
9161e2dfe0edd27004ccd964a39c092275e9e5ab | Add derivation outlines | eqs_backend/eqs_backend.py | eqs_backend/eqs_backend.py | # Copyright (c) 2016, Herman Bergwerf. All rights reserved.
# Use of this source code is governed by an AGPL-3.0-style license
# that can be found in the LICENSE file.
from flask import Flask, request
from neo4j.v1 import GraphDatabase, basic_auth
from .helpers import *
# TODO: consider using http://flask-restful.readthedocs.io/en/latest/
# http://blog.miguelgrinberg.com/post/designing-a-restful-api-using-flask-restful
# Define Flask server instance.
server = Flask(__name__)
driver = GraphDatabase.driver(
'bolt://0.0.0.0',
auth=basic_auth(
'neo4j',
'test'))
def setupDb():
"""
Setup empty database.
"""
db.run('CREATE (:ContextRoot)')
db.run('CREATE CONSTRAINT ON (node:Context) ASSERT node.label IS UNIQUE')
db.run('CREATE CONSTRAINT ON (node:Variable) ASSERT node.label IS UNIQUE')
def openDb():
"""
Open Neo4j session.
"""
return driver.session()
@server.route('/equation/', methods=['GET'])
def listEquations():
"""
REST interface for retrieving equations.
"""
return '{}'
@server.route('/search/')
def textSearch():
"""
Fulltext search interface to search for:
- contexts
- equation labels
- variables and aliases
"""
return '{}'
@server.route('/derivation/', methods=['POST'])
def appendDerivation():
"""
# Add derivation
A derivation has the following structure:
- One source relation: the derivation loads an external equation as base,
the source can be either a variable defenition or another derivation.
- A number of substitutions: in the derivation other equations or variable
definitions can be used for substitution.
- Rewritten expression: the expression that is equal to the source equation
after all substitutions are applied.
A derivation does not neccesarily have to substitute other equations. It can
simply be a rewritten form of the source equation. Note that SymPy can
assist in creating derivations. The main point is providing a more flexible
environment for adding custom derivations, and controlling which steps are
shown to the user.
"""
data = request.get_json()
if isDictAndContains(data, ['source', 'subs', 'expr']):
db = openDb()
# Retrieve source equation.
# Execute substitutions.
# Check output expression.
# Write expression to database.
db.close()
return dumpMessage('processed')
else:
return dumpMessage('failed', 'Incomplete data.')
@server.route('/variable/', methods=['POST'])
def addVariable():
"""
Add new variable within the given context.
"""
data = request.get_json()
if isDictAndContains(data, ['label', 'latex', 'parent', 'expr']):
db = openDb()
# Run query.
db.run('''
MATCH (parent:Context {{label:'{}'}})
CREATE (node:Variable {{label:'{}', latex:'{}', expr:'{}'}})
CREATE (node)-[:BelongsTo]->(parent)
'''.format(data['parent'], data['label'], data['latex'], data['expr']))
db.close()
return dumpMessage('processed')
else:
return dumpMessage('failed', 'Incomplete data.')
@server.route('/context/', methods=['POST'])
def appendContext():
"""
Append context to the given parent context.
If no parent is defined the context is appended to the root context.
"""
data = request.get_json()
if isDictAndContains(data, ['label']):
db = openDb()
# Find parent query.
parent = "Context {label:'{}'}".format(data[
'parent']) if 'parent' in data else 'ContextRoot'
# Run query.
db.run('''
MATCH (parent:{})
CREATE (node:Context {{label:'{}'}})
CREATE (node)-[:BelongsTo]->(parent)
'''.format(parent, data['label']))
db.close()
return dumpMessage('processed')
else:
return dumpMessage('failed', 'No context label provided.')
| # Copyright (c) 2016, Herman Bergwerf. All rights reserved.
# Use of this source code is governed by an AGPL-3.0-style license
# that can be found in the LICENSE file.
from flask import Flask, request
from neo4j.v1 import GraphDatabase, basic_auth
from .helpers import *
# Define Flask server instance.
server = Flask(__name__)
driver = GraphDatabase.driver(
'bolt://0.0.0.0',
auth=basic_auth(
'neo4j',
'test'))
def setupDb():
"""
Setup empty database.
"""
db.run('CREATE (:ContextRoot)')
db.run('CREATE CONSTRAINT ON (node:Context) ASSERT node.label IS UNIQUE')
db.run('CREATE CONSTRAINT ON (node:Variable) ASSERT node.label IS UNIQUE')
def openDb():
"""
Open Neo4j session.
"""
return driver.session()
@server.route('/equation/', methods=['GET'])
def listEquations():
"""
REST interface for retrieving equations.
"""
return '{}'
@server.route('/search/')
def textSearch():
"""
Fulltext search interface to search for:
- contexts
- equation labels
- variables and aliases
"""
return '{}'
@server.route('/derive/', methods=['POST'])
def appendDerivation():
"""
Append derivation to exiting equation.
"""
return '{}'
@server.route('/variable/', methods=['POST'])
def addVariable():
"""
Add new variable within the given context.
"""
data = request.get_json()
if isDictAndContains(data, ['label', 'latex', 'parent', 'expr']):
db = openDb()
# Run query.
db.run('''
MATCH (parent:Context {{label:'{}'}})
CREATE (node:Variable {{label:'{}', latex:'{}', expr:'{}'}})
CREATE (node)-[:BelongsTo]->(parent)
'''.format(data['parent'], data['label'], data['latex'], data['expr']))
db.close()
return dumpMessage('processed')
else:
return dumpMessage('failed', 'Incomplete data.')
@server.route('/context/', methods=['POST'])
def appendContext():
"""
Append context to the given parent context.
If no parent is defined the context is appended to the root context.
"""
data = request.get_json()
if isDictAndContains(data, ['label']):
db = openDb()
# Find parent query.
parent = "Context {label:'{}'}".format(data[
'parent']) if 'parent' in data else 'ContextRoot'
# Run query.
db.run('''
MATCH (parent:{})
CREATE (node:Context {{label:'{}'}})
CREATE (node)-[:BelongsTo]->(parent)
'''.format(parent, data['label']))
db.close()
return dumpMessage('processed')
else:
return dumpMessage('failed', 'No context label provided.')
| Python | 0.000005 |
27049d58b322bb50554198ecc64eab7731b86149 | add support for group metadata | zarr/meta.py | zarr/meta.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import json
import numpy as np
from zarr.compat import PY2, text_type
from zarr.errors import MetadataError
ZARR_FORMAT = 2
def decode_array_metadata(b):
s = text_type(b, 'ascii')
meta = json.loads(s)
zarr_format = meta.get('zarr_format', None)
if zarr_format != ZARR_FORMAT:
raise MetadataError('unsupported zarr format: %s' % zarr_format)
try:
meta = dict(
zarr_format=meta['zarr_format'],
shape=tuple(meta['shape']),
chunks=tuple(meta['chunks']),
dtype=decode_dtype(meta['dtype']),
compression=meta['compression'],
compression_opts=meta['compression_opts'],
fill_value=meta['fill_value'],
order=meta['order'],
)
except Exception as e:
raise MetadataError('error decoding metadata: %s' % e)
else:
return meta
def encode_array_metadata(meta):
meta = dict(
zarr_format=ZARR_FORMAT,
shape=meta['shape'],
chunks=meta['chunks'],
dtype=encode_dtype(meta['dtype']),
compression=meta['compression'],
compression_opts=meta['compression_opts'],
fill_value=meta['fill_value'],
order=meta['order'],
)
s = json.dumps(meta, indent=4, sort_keys=True, ensure_ascii=True)
b = s.encode('ascii')
return b
def encode_dtype(d):
if d.fields is None:
return d.str
else:
return d.descr
def _decode_dtype_descr(d):
# need to convert list of lists to list of tuples
if isinstance(d, list):
# recurse to handle nested structures
if PY2: # pragma: no cover
# under PY2 numpy rejects unicode field names
d = [(f.encode('ascii'), _decode_dtype_descr(v))
for f, v in d]
else:
d = [(f, _decode_dtype_descr(v)) for f, v in d]
return d
def decode_dtype(d):
d = _decode_dtype_descr(d)
return np.dtype(d)
def decode_group_metadata(b):
s = text_type(b, 'ascii')
meta = json.loads(s)
zarr_format = meta.get('zarr_format', None)
if zarr_format != ZARR_FORMAT:
raise MetadataError('unsupported zarr format: %s' % zarr_format)
try:
meta = dict(
zarr_format=meta['zarr_format'],
)
except Exception as e:
raise MetadataError('error decoding metadata: %s' % e)
else:
return meta
def encode_group_metadata(meta=None):
meta = dict(
zarr_format=ZARR_FORMAT,
)
s = json.dumps(meta, indent=4, sort_keys=True, ensure_ascii=True)
b = s.encode('ascii')
return b
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import json
import numpy as np
from zarr.compat import PY2, text_type
from zarr.errors import MetadataError
def decode_metadata(b):
s = text_type(b, 'ascii')
meta = json.loads(s)
zarr_format = meta.get('zarr_format', None)
if zarr_format != 1:
raise MetadataError('unsupported zarr format: %s' % zarr_format)
try:
meta = dict(
zarr_format=meta['zarr_format'],
shape=tuple(meta['shape']),
chunks=tuple(meta['chunks']),
dtype=decode_dtype(meta['dtype']),
compression=meta['compression'],
compression_opts=meta['compression_opts'],
fill_value=meta['fill_value'],
order=meta['order'],
)
except Exception as e:
raise MetadataError('error decoding metadata: %s' % e)
else:
return meta
def encode_metadata(meta):
meta = dict(
zarr_format=1,
shape=meta['shape'],
chunks=meta['chunks'],
dtype=encode_dtype(meta['dtype']),
compression=meta['compression'],
compression_opts=meta['compression_opts'],
fill_value=meta['fill_value'],
order=meta['order'],
)
s = json.dumps(meta, indent=4, sort_keys=True, ensure_ascii=True)
b = s.encode('ascii')
return b
def encode_dtype(d):
if d.fields is None:
return d.str
else:
return d.descr
def _decode_dtype_descr(d):
# need to convert list of lists to list of tuples
if isinstance(d, list):
# recurse to handle nested structures
if PY2: # pragma: no cover
# under PY2 numpy rejects unicode field names
d = [(f.encode('ascii'), _decode_dtype_descr(v))
for f, v in d]
else:
d = [(f, _decode_dtype_descr(v)) for f, v in d]
return d
def decode_dtype(d):
d = _decode_dtype_descr(d)
return np.dtype(d)
| Python | 0 |
1b2f9e8cff542868765f61d1af0eca004c1de791 | support skipping rows in the base processor | datapackage_pipelines_mojp/common/processors/base_processors.py | datapackage_pipelines_mojp/common/processors/base_processors.py | from itertools import chain
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines_mojp import settings as mojp_settings
class BaseProcessor(object):
"""
all mojp processor should extend this class
it is pluggable into our unit tests to allow mocks and automated tests of processors
"""
def __init__(self, parameters, datapackage, resources, settings=None):
self._parameters = parameters
self._datapackage = datapackage
self._resources = resources
self._settings = mojp_settings if not settings else settings
@classmethod
def main(cls):
# can be used like this in datapackage processor files:
# if __main__ == '__main__':
# Processor.main()
spew(*cls(*ingest()).spew())
def spew(self):
self._datapackage, self._resources = self._process(self._datapackage, self._resources)
return self._datapackage, self._resources
def _process(self, datapackage, resources):
return datapackage, resources
def _get_settings(self, key=None, default=None):
if key:
ret = getattr(self._settings, key, default)
if default is None and ret is None:
raise Exception("unknown key: {}".format(key))
else:
return ret
else:
return self._settings
class AddResourcesProcessor(BaseProcessor):
def _get_resource_descriptors(self):
return []
def _get_resources_iterator(self):
return ()
def _process(self, datapackage, resources):
datapackage["resources"] += self._get_resource_descriptors()
resources = chain(resources, self._get_resources_iterator())
return super(AddResourcesProcessor, self)._process(datapackage, resources)
class FilterResourcesProcessor(BaseProcessor):
def _filter_datapackage(self, datapackage):
datapackage["resources"] = self._filter_resource_descriptors(datapackage["resources"])
return datapackage
def _filter_resource_descriptors(self, descriptors):
return [self._filter_resource_descriptor(descriptor) for descriptor in descriptors]
def _filter_resource_descriptor(self, descriptor):
return descriptor
def _filter_resources(self, resources, datapackage):
for i, resource in enumerate(resources):
resource_descriptor = datapackage["resources"][i]
yield self._filter_resource(resource, resource_descriptor)
def _filter_resource(self, resource, descriptor):
for row in resource:
filtered_row = self._filter_row(row, descriptor)
if filtered_row is not None:
yield filtered_row
def _filter_row(self, row, resource_descriptor):
return row
def _process(self, datapackage, resources):
datapackage = self._filter_datapackage(datapackage)
resources = self._filter_resources(resources, datapackage)
return super(FilterResourcesProcessor, self)._process(datapackage, resources)
class BaseDownloadProcessor(AddResourcesProcessor):
def _get_resource_descriptors(self):
return [{"name": self._get_source_name(),
"path": "{}.csv".format(self._get_source_name()),
"schema": self._get_schema()}]
def _get_resources_iterator(self):
if self._parameters.get("mock"):
return [self._mock_download()]
else:
return [self._download()]
def _get_schema(self):
raise NotImplementedError()
def _download(self):
raise NotImplementedError()
def _mock_download(self):
raise NotImplementedError()
def _get_source_name(self):
raise NotImplementedError()
| from itertools import chain
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines_mojp import settings as mojp_settings
class BaseProcessor(object):
"""
all mojp processor should extend this class
it is pluggable into our unit tests to allow mocks and automated tests of processors
"""
def __init__(self, parameters, datapackage, resources, settings=None):
self._parameters = parameters
self._datapackage = datapackage
self._resources = resources
self._settings = mojp_settings if not settings else settings
@classmethod
def main(cls):
# can be used like this in datapackage processor files:
# if __main__ == '__main__':
# Processor.main()
spew(*cls(*ingest()).spew())
def spew(self):
self._datapackage, self._resources = self._process(self._datapackage, self._resources)
return self._datapackage, self._resources
def _process(self, datapackage, resources):
return datapackage, resources
def _get_settings(self, key=None, default=None):
if key:
ret = getattr(self._settings, key, default)
if default is None and ret is None:
raise Exception("unknown key: {}".format(key))
else:
return ret
else:
return self._settings
class AddResourcesProcessor(BaseProcessor):
def _get_resource_descriptors(self):
return []
def _get_resources_iterator(self):
return ()
def _process(self, datapackage, resources):
datapackage["resources"] += self._get_resource_descriptors()
resources = chain(resources, self._get_resources_iterator())
return super(AddResourcesProcessor, self)._process(datapackage, resources)
class FilterResourcesProcessor(BaseProcessor):
def _filter_datapackage(self, datapackage):
datapackage["resources"] = self._filter_resource_descriptors(datapackage["resources"])
return datapackage
def _filter_resource_descriptors(self, descriptors):
return [self._filter_resource_descriptor(descriptor) for descriptor in descriptors]
def _filter_resource_descriptor(self, descriptor):
return descriptor
def _filter_resources(self, resources, datapackage):
for i, resource in enumerate(resources):
resource_descriptor = datapackage["resources"][i]
yield self._filter_resource(resource, resource_descriptor)
def _filter_resource(self, resource, descriptor):
for row in resource:
yield self._filter_row(row, descriptor)
def _filter_row(self, row, resource_descriptor):
return row
def _process(self, datapackage, resources):
datapackage = self._filter_datapackage(datapackage)
resources = self._filter_resources(resources, datapackage)
return super(FilterResourcesProcessor, self)._process(datapackage, resources)
class BaseDownloadProcessor(AddResourcesProcessor):
def _get_resource_descriptors(self):
return [{"name": self._get_source_name(),
"path": "{}.csv".format(self._get_source_name()),
"schema": self._get_schema()}]
def _get_resources_iterator(self):
if self._parameters.get("mock"):
return [self._mock_download()]
else:
return [self._download()]
def _get_schema(self):
raise NotImplementedError()
def _download(self):
raise NotImplementedError()
def _mock_download(self):
raise NotImplementedError()
def _get_source_name(self):
raise NotImplementedError()
| Python | 0 |
69642fbfa143d475b3dcc548bffbda8a6dd6c680 | Enable template caching in production | rotd/settings/production.py | rotd/settings/production.py | # -*- coding: utf-8 -*-
from .base import *
from .util import get_env_setting
DEBUG = False
DOMAIN = get_env_setting('ROTD_DOMAIN')
ALLOWED_HOSTS = [
DOMAIN,
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": get_env_setting('ROTD_DB_NAME'),
"USER": get_env_setting('ROTD_DB_USER'),
"PASSWORD": get_env_setting('ROTD_DB_PASSWORD'),
"HOST": "localhost",
"PORT": "",
},
}
SECRET_KEY = get_env_setting('ROTD_SECRET_KEY')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = get_env_setting('ROTD_EMAIL_HOST')
EMAIL_HOST_PASSWORD = get_env_setting('ROTD_EMAIL_HOST_PASSWORD')
EMAIL_HOST_USER = get_env_setting('ROTD_EMAIL_HOST_USER')
EMAIL_PORT = get_env_setting('ROTD_EMAIL_PORT')
EMAIL_USE_TLS = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.abspath(os.path.join(BASE_DIR, 'templates'))],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
],
},
},
]
| # -*- coding: utf-8 -*-
from .base import *
from .util import get_env_setting
DEBUG = False
DOMAIN = get_env_setting('ROTD_DOMAIN')
ALLOWED_HOSTS = [
DOMAIN,
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": get_env_setting('ROTD_DB_NAME'),
"USER": get_env_setting('ROTD_DB_USER'),
"PASSWORD": get_env_setting('ROTD_DB_PASSWORD'),
"HOST": "localhost",
"PORT": "",
},
}
SECRET_KEY = get_env_setting('ROTD_SECRET_KEY')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = get_env_setting('ROTD_EMAIL_HOST')
EMAIL_HOST_PASSWORD = get_env_setting('ROTD_EMAIL_HOST_PASSWORD')
EMAIL_HOST_USER = get_env_setting('ROTD_EMAIL_HOST_USER')
EMAIL_PORT = get_env_setting('ROTD_EMAIL_PORT')
EMAIL_USE_TLS = True
| Python | 0 |
faaf1d64fc8c5b15c346f70288235426f0647757 | use /usr/bin/env python to run the script | FulltoSNP.py | FulltoSNP.py | #!/usr/bin/env python
import sys
import re
import itertools
import math
from Bio import SeqIO
#SNP alignment from full Alignment nexus file
#Check for correct commandline arguments
if len(sys.argv) != 4:
print("Usage: FulltoSNP.py <nexus file> <output file> <threshold>")
sys.exit(0)
#Get filenames
InFileName = sys.argv[1]
OutFileName = sys.argv[2]
threshold = sys.argv[3]
PosOutFileName = sys.argv[2]+'positions'
record_dict = SeqIO.to_dict(SeqIO.parse(InFileName,"nexus"))
#seperate speciesnames from sequences
seqs = []
titles = []
for key in record_dict:
titles.append(key)
x = record_dict[key]
seqs.append(x.seq)
#transpose string lists
thresh = math.ceil(float(threshold) * len(seqs))
print(thresh)
seqsTran = zip(*seqs)
snps = []
#for every tuple check if value is the same, if so remove tuple
pos = 1
positions=[]
for s in seqsTran[:]:
if len(set(s))!=1 and s.count('-')<= thresh:
snps.append(s)
positions.append(pos)
pos=pos+1
print(len(positions))
seqsTran = []
results = zip(*snps)
for i in range(len(results)):
results[i] = ''.join(results[i])
SeqDict={}
print(len(results[0]))
for i in range(len(results)):
SeqDict[titles[i]]=results[i]
OutFile = open(OutFileName,'w')
#write file header
OutFile.write("#NEXUS" + "\n" + "Begin DATA;" + "\n\t" + "Dimensions ntax=" + str(len(SeqDict)) + " nchar=" + str(len(results[0])) + ";" + "\n\t" + "Format datatype=DNA gap=-;" + "\n\t" + "Matrix" + "\n")
#write all of the SNPs into the new file
for key in SeqDict:
newSeq = "".join(SeqDict[key])
OutFile.write(key + "\n" + newSeq + "\n")
OutFile.write(";" + "\n" + "END;")
OutFile.close()
OutFile2 = open(PosOutFileName,'w')
for i in positions:
OutFile2.write(str(i)+'\n')
OutFile2.close()
| #!/usr/bin/env python2.6
import sys
import re
import itertools
import math
from Bio import SeqIO
#SNP alignment from full Alignment nexus file
#Check for correct commandline arguments
if len(sys.argv) != 4:
print("Usage: FulltoSNP.py <nexus file> <output file> <threshold>")
sys.exit(0)
#Get filenames
InFileName = sys.argv[1]
OutFileName = sys.argv[2]
threshold = sys.argv[3]
PosOutFileName = sys.argv[2]+'positions'
record_dict = SeqIO.to_dict(SeqIO.parse(InFileName,"nexus"))
#seperate speciesnames from sequences
seqs = []
titles = []
for key in record_dict:
titles.append(key)
x = record_dict[key]
seqs.append(x.seq)
#transpose string lists
thresh = math.ceil(float(threshold) * len(seqs))
print(thresh)
seqsTran = zip(*seqs)
snps = []
#for every tuple check if value is the same, if so remove tuple
pos = 1
positions=[]
for s in seqsTran[:]:
if len(set(s))!=1 and s.count('-')<= thresh:
snps.append(s)
positions.append(pos)
pos=pos+1
print(len(positions))
seqsTran = []
results = zip(*snps)
for i in range(len(results)):
results[i] = ''.join(results[i])
SeqDict={}
print(len(results[0]))
for i in range(len(results)):
SeqDict[titles[i]]=results[i]
OutFile = open(OutFileName,'w')
#write file header
OutFile.write("#NEXUS" + "\n" + "Begin DATA;" + "\n\t" + "Dimensions ntax=" + str(len(SeqDict)) + " nchar=" + str(len(results[0])) + ";" + "\n\t" + "Format datatype=DNA gap=-;" + "\n\t" + "Matrix" + "\n")
#write all of the SNPs into the new file
for key in SeqDict:
newSeq = "".join(SeqDict[key])
OutFile.write(key + "\n" + newSeq + "\n")
OutFile.write(";" + "\n" + "END;")
OutFile.close()
OutFile2 = open(PosOutFileName,'w')
for i in positions:
OutFile2.write(str(i)+'\n')
OutFile2.close()
| Python | 0.000001 |
4b7e77b51318522db03f0b8230e9f7400cb8a312 | Add remaining test api keys to view tests | evexml/tests/test_views.py | evexml/tests/test_views.py | """evexml app unittests for views
"""
import json
from django.conf import settings
from django.test import TestCase
from django.shortcuts import reverse
class AddAPIViewTest(TestCase):
"""Tests for the view which displays the "Add API" form.
"""
@classmethod
def setUpClass(cls):
super(AddAPIViewTest, cls).setUpClass()
data_dir = getattr(settings, 'DATA_DIR')
with data_dir.joinpath('conf', 'test_secrets.json').open() as handle:
secrets = json.load(handle)
cls.testkeys = secrets['apikeys']
cls.url = reverse('eveapi_add')
def test_invalid_api(self):
"""Ensure an invalid api is rejected.
"""
response = self.client.post(self.url, data={
'key_id': '1',
'v_code': 'test'}, follow=True)
self.assertContains(response, 'problem')
# Mask: 4294967295
def test_api_full_all(self):
"""Ensure full and account-wide keypair is accepted.
"""
keypair = self.testkeys['full']['all']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'success')
def test_api_full_char_corp(self):
"""Ensure full but corp character only keypair is rejected.
"""
keypair = self.testkeys['full']['char_corp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_full_char_noncorp(self):
"""Ensure full but non-corp character only keypair is rejected.
"""
keypair = self.testkeys['full']['char_noncorp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
# Mask: 4294901631
def test_api_partial_all(self):
"""Ensure partial and account-wide keypair is rejected.
"""
keypair = self.testkeys['partial']['all']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_partial_char_corp(self):
"""Ensure partial and corp character only keypair is rejected.
"""
keypair = self.testkeys['partial']['char_corp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_partial_char_noncorp(self):
"""Ensure partial and non-corp character only keypair is rejected.
"""
keypair = self.testkeys['partial']['char_noncorp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
# Mask: 0
def test_api_blank_all(self):
"""Ensure blank and account-wide keypair is rejected.
"""
keypair = self.testkeys['blank']['all']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_blank_char_corp(self):
"""Ensure blank and corp character only keypair is rejected.
"""
keypair = self.testkeys['blank']['char_corp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_blank_char_noncorp(self):
"""Ensure full but non-corp character only keypair is rejected.
"""
keypair = self.testkeys['blank']['char_noncorp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
# Expires
def test_api_expires_all(self):
"""Ensure full and account-wide but expiring keypair is rejected.
"""
keypair = self.testkeys['full_expires']['all']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_expires_char_corp(self):
"""Ensure full but corp character only, expiring keypair is rejected.
"""
keypair = self.testkeys['full_expires']['char_corp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_expires_char_noncorp(self):
"""Ensure full but non-corp character, expiring keypair is rejected.
"""
keypair = self.testkeys['full_expires']['char_noncorp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
| import json
from django.conf import settings
from django.test import TestCase
from django.shortcuts import reverse
class AddAPIViewTest(TestCase):
"""Tests for the view which displays the "Add API" form.
"""
@classmethod
def setUpClass(cls):
super(AddAPIViewTest, cls).setUpClass()
data_dir = getattr(settings, 'DATA_DIR')
with data_dir.joinpath('conf', 'test_secrets.json').open() as handle:
secrets = json.load(handle)
cls.testkeys = secrets['apikeys']
cls.url = reverse('eveapi_add')
def test_invalid_api(self):
"""Ensure an invalid api is rejected.
"""
response = self.client.post(self.url, data={
'key_id': '1',
'v_code': 'test'}, follow=True)
self.assertContains(response, 'problem')
# Mask: 4294967295
def test_api_full_all(self):
"""Ensure full and account-wide keypair is accepted.
"""
keypair = self.testkeys['full']['all']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'success')
def test_api_full_char_corp(self):
"""Ensure full but corp character only keypair is rejected.
"""
keypair = self.testkeys['full']['char_corp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
def test_api_full_char_noncorp(self):
"""Ensure full but non-corp character only keypair is rejected.
"""
keypair = self.testkeys['full']['char_noncorp']
response = self.client.post(self.url, data={
'key_id': keypair['key_id'],
'v_code': keypair['v_code']}, follow=True)
self.assertContains(response, 'problem')
| Python | 0 |
d01bb6e89c6fcfe8a17d90f3ace175ad26f921b5 | Support CSV files beginning with a byte order mark | git-keeper-core/gkeepcore/local_csv_files.py | git-keeper-core/gkeepcore/local_csv_files.py | # Copyright 2016 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Provides concrete classes for reading and writing local CSV files as well
as a function for getting rows from a local CSV file.
"""
import csv
from gkeepcore.csv_files import CSVReader, CSVWriter, CSVError
def csv_rows(file_path: str) -> list:
"""
Retrieve rows from a local CSV file.
:param file_path: path to the file
:return: rows as a list of lists
"""
try:
with open(file_path) as f:
rows = list(csv.reader(f))
except csv.Error:
raise CSVError('Error reading from {0}'.format(file_path))
return rows
class LocalCSVReader(CSVReader):
"""Allows reading from a local CSV file."""
def __init__(self, file_path):
"""
:param file_path: path to the CSV file to read
"""
try:
with open(file_path, encoding='utf-8-sig') as f:
self._rows = list(csv.reader(f))
except (csv.Error, OSError):
raise CSVError('Error reading from {0}'.format(file_path))
def get_rows(self) -> list:
"""
Retrieve the rows from the CSV file
:return: list of lists representing all rows from the file
"""
return self._rows
class LocalCSVWriter(CSVWriter):
"""Allows writing to a local CSV file."""
def __init__(self, file_path):
"""
:param file_path: path to the CSV file to write
"""
self._file_path = file_path
def write_rows(self, rows):
"""
Write rows to the file
:param rows: list of lists (or tuples) to write
"""
try:
with open(self._file_path, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
except OSError as e:
raise CSVError('Error writing to {0}'
.format(self._file_path))
| # Copyright 2016 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Provides concrete classes for reading and writing local CSV files as well
as a function for getting rows from a local CSV file.
"""
import csv
from gkeepcore.csv_files import CSVReader, CSVWriter, CSVError
def csv_rows(file_path: str) -> list:
"""
Retrieve rows from a local CSV file.
:param file_path: path to the file
:return: rows as a list of lists
"""
try:
with open(file_path) as f:
rows = list(csv.reader(f))
except csv.Error:
raise CSVError('Error reading from {0}'.format(file_path))
return rows
class LocalCSVReader(CSVReader):
"""Allows reading from a local CSV file."""
def __init__(self, file_path):
"""
:param file_path: path to the CSV file to read
"""
try:
with open(file_path) as f:
self._rows = list(csv.reader(f))
except (csv.Error, OSError):
raise CSVError('Error reading from {0}'.format(file_path))
def get_rows(self) -> list:
"""
Retrieve the rows from the CSV file
:return: list of lists representing all rows from the file
"""
return self._rows
class LocalCSVWriter(CSVWriter):
"""Allows writing to a local CSV file."""
def __init__(self, file_path):
"""
:param file_path: path to the CSV file to write
"""
self._file_path = file_path
def write_rows(self, rows):
"""
Write rows to the file
:param rows: list of lists (or tuples) to write
"""
try:
with open(self._file_path, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
except OSError as e:
raise CSVError('Error writing to {0}'
.format(self._file_path))
| Python | 0 |
1657e46cd5c2a81df4cbb73b292b0bf9072d5c51 | Fix test: make sure that Isolation Forest actually make a categorical split | h2o-py/tests/testdir_tree/pyunit_tree_irf.py | h2o-py/tests/testdir_tree/pyunit_tree_irf.py | import h2o
from h2o.tree import H2OTree
from h2o.estimators import H2OIsolationForestEstimator
from tests import pyunit_utils
def check_tree(tree, tree_number, tree_class = None):
assert tree is not None
assert len(tree) > 0
assert tree._tree_number == tree_number
assert tree._tree_class == tree_class
assert tree.root_node is not None
assert tree.left_children is not None
assert tree.right_children is not None
assert tree.thresholds is not None
assert tree.nas is not None
assert tree.descriptions is not None
assert tree.node_ids is not None
assert tree.model_id is not None
assert tree.levels is not None
assert tree.root_node.na_direction is not None
assert tree.root_node.id is not None
def irf_tree_Test():
cat_frame = h2o.create_frame(cols=10, categorical_fraction=1, seed=42)
# check all columns are categorical
assert set(cat_frame.types.values()) == set(['enum'])
iso_model = H2OIsolationForestEstimator(seed=42)
iso_model.train(training_frame=cat_frame)
tree = H2OTree(iso_model, 5)
check_tree(tree, 5, None)
print(tree)
if __name__ == "__main__":
pyunit_utils.standalone_test(irf_tree_Test)
else:
irf_tree_Test()
| import h2o
from h2o.tree import H2OTree
from h2o.estimators import H2OIsolationForestEstimator
from tests import pyunit_utils
def check_tree(tree, tree_number, tree_class = None):
assert tree is not None
assert len(tree) > 0
assert tree._tree_number == tree_number
assert tree._tree_class == tree_class
assert tree.root_node is not None
assert tree.left_children is not None
assert tree.right_children is not None
assert tree.thresholds is not None
assert tree.nas is not None
assert tree.descriptions is not None
assert tree.node_ids is not None
assert tree.model_id is not None
assert tree.levels is not None
assert tree.root_node.na_direction is not None
assert tree.root_node.id is not None
def irf_tree_Test():
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv"))
prostate["RACE"] = prostate["RACE"].asfactor()
iso_model = H2OIsolationForestEstimator()
iso_model.train(training_frame = prostate, x = list(set(prostate.col_names) - set(["ID", "CAPSULE"])))
tree = H2OTree(iso_model, 5)
check_tree(tree, 5, None)
print(tree)
if __name__ == "__main__":
pyunit_utils.standalone_test(irf_tree_Test)
else:
irf_tree_Test()
| Python | 0.000076 |
0500105b9dc148855b7957963b3949d89a7cc3b4 | Remove routes for PayPal | gratipay/models/exchange_route.py | gratipay/models/exchange_route.py | from __future__ import absolute_import, division, print_function, unicode_literals
import balanced
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __bool__(self):
return self.error != 'invalidated'
__nonzero__ = __bool__
@classmethod
def from_id(cls, id):
return cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
@classmethod
def from_network(cls, participant, network):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if r:
r.__dict__['participant'] = participant
return r
@classmethod
def from_address(cls, participant, network, address):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if r:
r.__dict__['participant'] = participant
return r
@classmethod
def associate_balanced(cls, participant, balanced_account, network, address):
if network == 'balanced-cc':
obj = balanced.Card.fetch(address)
else:
assert network == 'balanced-ba', network # sanity check
obj = balanced.BankAccount.fetch(address)
obj.associate_to_customer(balanced_account)
return cls.insert(participant, network, address)
@classmethod
def insert(cls, participant, network, address, error='', fee_cap=None):
participant_id = participant.id
r = cls.db.one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'balanced-cc':
participant.update_giving_and_tippees()
r.__dict__['participant'] = participant
return r
def invalidate(self):
if self.network == 'balanced-ba':
balanced.BankAccount.fetch(self.address).delete()
elif self.network == 'balanced-cc':
balanced.Card.fetch(self.address).unstore()
elif self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
# For Paypal, we remove the record entirely to prevent
# an integrity error if the user tries to add the route again
if self.network == 'paypal':
self.db.run("DELETE FROM exchange_routes WHERE id=%s", (self.id,))
else:
self.update_error('invalidated')
def update_error(self, new_error, propagate=True):
id = self.id
old_error = self.error
if old_error == 'invalidated':
return
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update the receiving amounts of tippees if requested and necessary
if not propagate or self.network != 'balanced-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
self.participant.update_giving_and_tippees()
| from __future__ import absolute_import, division, print_function, unicode_literals
import balanced
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __bool__(self):
return self.error != 'invalidated'
__nonzero__ = __bool__
@classmethod
def from_id(cls, id):
return cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
@classmethod
def from_network(cls, participant, network):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if r:
r.__dict__['participant'] = participant
return r
@classmethod
def from_address(cls, participant, network, address):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if r:
r.__dict__['participant'] = participant
return r
@classmethod
def associate_balanced(cls, participant, balanced_account, network, address):
if network == 'balanced-cc':
obj = balanced.Card.fetch(address)
else:
assert network == 'balanced-ba', network # sanity check
obj = balanced.BankAccount.fetch(address)
obj.associate_to_customer(balanced_account)
return cls.insert(participant, network, address)
@classmethod
def insert(cls, participant, network, address, error='', fee_cap=None):
participant_id = participant.id
r = cls.db.one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'balanced-cc':
participant.update_giving_and_tippees()
r.__dict__['participant'] = participant
return r
def invalidate(self):
if self.network == 'balanced-ba':
balanced.BankAccount.fetch(self.address).delete()
elif self.network == 'balanced-cc':
balanced.Card.fetch(self.address).unstore()
elif self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
self.update_error('invalidated')
def update_error(self, new_error, propagate=True):
id = self.id
old_error = self.error
if old_error == 'invalidated':
return
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update the receiving amounts of tippees if requested and necessary
if not propagate or self.network != 'balanced-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
self.participant.update_giving_and_tippees()
| Python | 0.000001 |
7e5477682dfc0d907fe55a489c75179a6e4c832b | fix Swale import script | polling_stations/apps/data_collection/management/commands/import_swale.py | polling_stations/apps/data_collection/management/commands/import_swale.py | from data_collection.management.commands import BaseCsvStationsShpDistrictsImporter
class Command(BaseCsvStationsShpDistrictsImporter):
srid = 27700
council_id = 'E07000113'
districts_name = 'shp/Swale Polling Districts'
stations_name = 'Swale 21 Feb 2017 Polling scheme station numbers.csv'
elections = ['local.kent.2017-05-04']
def district_record_to_dict(self, record):
code = str(record[0]).strip()
return {
'internal_council_id': code,
'name': str(record[1]).strip(),
}
def station_record_to_dict(self, record):
codes = record.pd.split(" and ")
stations = []
for code in codes:
stations.append({
'internal_council_id': code,
'postcode': '',
'address': record.premises,
'polling_district_id': code,
'location': None,
})
return stations
| from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = 'E07000113'
districts_name = 'shp/Swale Polling Districts'
stations_name = 'shp/Swale Polling Stations.shp'
#elections = ['local.kent.2017-05-04']
elections = []
def district_record_to_dict(self, record):
code = str(record[0]).strip()
return {
'internal_council_id': code,
'name': str(record[1]).strip(),
'polling_station_id': code,
}
def station_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'postcode': '',
'address': str(record[4]).strip(),
}
| Python | 0.000002 |
fc7f51877b6b991ad5a25afb755dd7a35e91dfea | Use get_or_create to avoid duplicate objects | cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py | cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.get_or_create(method=value, defaults={"reference": uuid.uuid4()})
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save()
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
| Python | 0.000001 |
c8ccee311b5939b116987c1a6192cc2935f9ff3b | test bad query op | test/test_utility_functions.py | test/test_utility_functions.py | import testutils
import json
import psycopg2
class TestSplitQueries(testutils.BedquiltTestCase):
def _assert_examples(self, examples):
for query, match, specials in examples:
result = self._query("""
select * from bq_split_queries('{}'::jsonb)
""".format(json.dumps(query)))
self.assertEqual(json.loads(result[0][0]), match)
self.assertEqual(result[0][1], specials)
def test_simple_queries_with_no_specials(self):
examples = [
({'a': {'b': 1}},
{'a': {'b': 1}},
[]),
({'a': 1, 'b': 2},
{'a': 1, 'b': 2},
[])
]
self._assert_examples(examples)
def test_advanced_queries(self):
examples = [
(
{
'a': {
'b': {
'$eq': 22
}
}
},
{},
["and bq_jdoc #> '{a,b}' = '22'::jsonb"]
),
(
{
'a': {
'b': {
'$eq': 22
}
},
'c': 44
},
{
'c': 44
},
["and bq_jdoc #> '{a,b}' = '22'::jsonb"]
),
(
{
'a': {
'b': {
'$eq': 22
},
'c': 44
}
},
{
'a': {'c': 44}
},
["and bq_jdoc #> '{a,b}' = '22'::jsonb"]
)
]
self._assert_examples(examples)
def test_supported_ops(self):
examples = [
(
{'a': {'b': {'$eq': 42}}},
{},
["and bq_jdoc #> '{a,b}' = '42'::jsonb"]
),
(
{'a': {'b': {'$noteq': 42}}},
{},
["and bq_jdoc #> '{a,b}' != '42'::jsonb"]
),
(
{'a': {'b': {'$gte': 42}}},
{},
["and bq_jdoc #> '{a,b}' >= '42'::jsonb"]
),
(
{'a': {'b': {'$gt': 42}}},
{},
["and bq_jdoc #> '{a,b}' > '42'::jsonb"]
),
(
{'a': {'b': {'$lte': 42}}},
{},
["and bq_jdoc #> '{a,b}' <= '42'::jsonb"]
),
(
{'a': {'b': {'$lt': 42}}},
{},
["and bq_jdoc #> '{a,b}' < '42'::jsonb"]
),
(
{'a': {'b': {'$in': [22, 42]}}},
{},
["and bq_jdoc #> '{a,b}' <@ '[22, 42]'::jsonb"]
),
]
self._assert_examples(examples)
def test_bad_op(self):
query = {
'a': {'$totallynotavalidop': 42}
}
with self.assertRaises(psycopg2.InternalError):
self.cur.execute("""
select * from bq_split_queries('{}'::jsonb)
""".format(json.dumps(query)))
self.conn.rollback()
| import testutils
import json
import psycopg2
class TestSplitQueries(testutils.BedquiltTestCase):
def _assert_examples(self, examples):
for query, match, specials in examples:
result = self._query("""
select * from bq_split_queries('{}'::jsonb)
""".format(json.dumps(query)))
self.assertEqual(json.loads(result[0][0]), match)
self.assertEqual(result[0][1], specials)
def test_simple_queries_with_no_specials(self):
examples = [
({'a': {'b': 1}},
{'a': {'b': 1}},
[]),
({'a': 1, 'b': 2},
{'a': 1, 'b': 2},
[])
]
self._assert_examples(examples)
def test_advanced_queries(self):
examples = [
(
{
'a': {
'b': {
'$eq': 22
}
}
},
{},
["and bq_jdoc #> '{a,b}' = '22'::jsonb"]
),
(
{
'a': {
'b': {
'$eq': 22
}
},
'c': 44
},
{
'c': 44
},
["and bq_jdoc #> '{a,b}' = '22'::jsonb"]
),
(
{
'a': {
'b': {
'$eq': 22
},
'c': 44
}
},
{
'a': {'c': 44}
},
["and bq_jdoc #> '{a,b}' = '22'::jsonb"]
)
]
self._assert_examples(examples)
def test_supported_ops(self):
examples = [
(
{'a': {'b': {'$eq': 42}}},
{},
["and bq_jdoc #> '{a,b}' = '42'::jsonb"]
),
(
{'a': {'b': {'$noteq': 42}}},
{},
["and bq_jdoc #> '{a,b}' != '42'::jsonb"]
),
(
{'a': {'b': {'$gte': 42}}},
{},
["and bq_jdoc #> '{a,b}' >= '42'::jsonb"]
),
(
{'a': {'b': {'$gt': 42}}},
{},
["and bq_jdoc #> '{a,b}' > '42'::jsonb"]
),
(
{'a': {'b': {'$lte': 42}}},
{},
["and bq_jdoc #> '{a,b}' <= '42'::jsonb"]
),
(
{'a': {'b': {'$lt': 42}}},
{},
["and bq_jdoc #> '{a,b}' < '42'::jsonb"]
),
(
{'a': {'b': {'$in': [22, 42]}}},
{},
["and bq_jdoc #> '{a,b}' <@ '[22, 42]'::jsonb"]
),
]
self._assert_examples(examples)
| Python | 0.998676 |
d6a8e42cb3bd963632500541b5e4e71c700c246e | Fix migration | nodeconductor/cost_tracking/migrations/0006_add_pricelist_backend_ids.py | nodeconductor/cost_tracking/migrations/0006_add_pricelist_backend_ids.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('cost_tracking', '0005_expand_item_type_size'),
]
operations = [
migrations.RenameField(
model_name='defaultpricelistitem',
old_name='service_content_type',
new_name='resource_content_type',
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_choice_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_option_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_product_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='pricelistitem',
name='resource_content_type',
field=models.ForeignKey(related_name='+', default=1, to='contenttypes.ContentType'),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('cost_tracking', '0005_expand_item_type_size'),
]
operations = [
migrations.RenameField(
model_name='defaultpricelistitem',
old_name='service_content_type',
new_name='resource_content_type',
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_choice_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_option_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_product_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='pricelistitem',
name='resource_content_type',
field=models.ForeignKey(related_name='+', default=0, to='contenttypes.ContentType'),
preserve_default=False,
),
]
| Python | 0 |
3d8f627a25cb83a202878897607e5095270c332d | Convert ruuvi_rx beacon timestamp to UTC time. (#54) | ruuvitag_sensor/ruuvi_rx.py | ruuvitag_sensor/ruuvi_rx.py | from datetime import datetime
from multiprocessing import Manager
from threading import Thread
import time
from concurrent.futures import ProcessPoolExecutor
from rx.subjects import Subject
from ruuvitag_sensor.ruuvi import RuuviTagSensor, RunFlag
def _run_get_data_background(macs, queue, shared_data, bt_device):
"""
Background process function for RuuviTag Sensors
"""
run_flag = RunFlag()
def add_data(data):
if not shared_data['run_flag']:
run_flag.running = False
data[1]['time'] = datetime.utcnow().isoformat()
queue.put(data)
RuuviTagSensor.get_datas(add_data, macs, run_flag, bt_device)
class RuuviTagReactive(object):
"""
Reactive wrapper and background process for RuuviTagSensor get_datas
"""
@staticmethod
def _data_update(subjects, queue, run_flag):
"""
Get data from backgound process and notify all subscribed observers with the new data
"""
while run_flag.running:
while not queue.empty():
data = queue.get()
for subject in [s for s in subjects if not s.is_disposed]:
subject.on_next(data)
time.sleep(0.1)
def __init__(self, macs=[], bt_device=''):
"""
Start background process for get_datas and async task for notifying all subscribed observers
Args:
macs (list): MAC addresses
bt_device (string): Bluetooth device id
"""
self._run_flag = RunFlag()
self._subjects = []
m = Manager()
q = m.Queue()
# Use Manager dict to share data between processes
self._shared_data = m.dict()
self._shared_data['run_flag'] = True
# Start data updater
notify_thread = Thread(target=RuuviTagReactive._data_update, args=(self._subjects, q, self._run_flag))
notify_thread.start()
# Start background process
executor = ProcessPoolExecutor(1)
executor.submit(_run_get_data_background, macs, q, self._shared_data, bt_device)
def get_subject(self):
"""
Returns:
subject : Reactive Extension Subject
"""
if not self._run_flag.running:
raise Exception('RuuviTagReactive stopped')
subject = Subject()
self._subjects.append(subject)
return subject
def stop(self):
"""
Stop get_datas
"""
self._run_flag.running = False
self._shared_data['run_flag'] = False
for s in self._subjects:
s.dispose()
| from datetime import datetime
from multiprocessing import Manager
from threading import Thread
import time
from concurrent.futures import ProcessPoolExecutor
from rx.subjects import Subject
from ruuvitag_sensor.ruuvi import RuuviTagSensor, RunFlag
def _run_get_data_background(macs, queue, shared_data, bt_device):
"""
Background process function for RuuviTag Sensors
"""
run_flag = RunFlag()
def add_data(data):
if not shared_data['run_flag']:
run_flag.running = False
data[1]['time'] = str(datetime.now())
queue.put(data)
RuuviTagSensor.get_datas(add_data, macs, run_flag, bt_device)
class RuuviTagReactive(object):
"""
Reactive wrapper and background process for RuuviTagSensor get_datas
"""
@staticmethod
def _data_update(subjects, queue, run_flag):
"""
Get data from backgound process and notify all subscribed observers with the new data
"""
while run_flag.running:
while not queue.empty():
data = queue.get()
for subject in [s for s in subjects if not s.is_disposed]:
subject.on_next(data)
time.sleep(0.1)
def __init__(self, macs=[], bt_device=''):
"""
Start background process for get_datas and async task for notifying all subscribed observers
Args:
macs (list): MAC addresses
bt_device (string): Bluetooth device id
"""
self._run_flag = RunFlag()
self._subjects = []
m = Manager()
q = m.Queue()
# Use Manager dict to share data between processes
self._shared_data = m.dict()
self._shared_data['run_flag'] = True
# Start data updater
notify_thread = Thread(target=RuuviTagReactive._data_update, args=(self._subjects, q, self._run_flag))
notify_thread.start()
# Start background process
executor = ProcessPoolExecutor(1)
executor.submit(_run_get_data_background, macs, q, self._shared_data, bt_device)
def get_subject(self):
"""
Returns:
subject : Reactive Extension Subject
"""
if not self._run_flag.running:
raise Exception('RuuviTagReactive stopped')
subject = Subject()
self._subjects.append(subject)
return subject
def stop(self):
"""
Stop get_datas
"""
self._run_flag.running = False
self._shared_data['run_flag'] = False
for s in self._subjects:
s.dispose()
| Python | 0.999997 |
0b14f93121f3feaa4433eaf8275f5ad40c646b48 | Update NumberPathShuffled.py | _includes/NumberPathShuffled.py | _includes/NumberPathShuffled.py | from random import shuffle
N = 100
shufflePeriod = 10000000
print(N)
connected = [[]]
for i in range(N):
connected.append([])
for m in range(1,N+1):
# for n in range(1,N+1):
for n in range(N,0,-1):
if ((not m == n) and (m%n == 0 or n%m == 0)):
connected[m].append(n)
def explore(path):
global longestLength, longestPath, connected, shuffleCounter, shufflePeriod
shuffleCounter += 1
if shuffleCounter == shufflePeriod:
shuffleCounter = 0
for L in connected:
shuffle(L)
print "Shuffled still",longestLength,longestPath
isExtendable = 0
n = path[-1]
# shuffledconnected = list(connected[n])
# shuffle(shuffledconnected)
for m in connected[n]:
#for m in shuffledconnected:
if not m in path:
isExtendable = 1
newPath = list(path)
newPath.append(m)
explore(newPath)
if not isExtendable:
if len(path) > longestLength:
longestLength = len(path)
longestPath = path
print longestLength,longestPath
longestPath = []
longestLength = 0
#for n in range(1,N+1):
# print(n)
# explore([n])
shuffleCounter = 0
explore([81])
print("Longest path length is",longestLength)
print(longestPath)
| from random import shuffle
N = 100
shufflePeriod = 10000000
print(N)
connected = [[]]
for i in range(N):
connected.append([])
for m in range(1,N+1):
# for n in range(1,N+1):
for n in range(N,0,-1):
if ((not m == n) and (m%n == 0 or n%m == 0)):
connected[m].append(n)
def explore(path):
global longestLength, longestPath, connected, shuffleCounter, shufflePeriod
shuffleCounter += 1
if shuffleCounter == shufflePeriod:
shuffleCounter = 0
for L in connected:
shuffle(L)
print "Shuffled"
isExtendable = 0
n = path[-1]
# shuffledconnected = list(connected[n])
# shuffle(shuffledconnected)
for m in connected[n]:
#for m in shuffledconnected:
if not m in path:
isExtendable = 1
newPath = list(path)
newPath.append(m)
explore(newPath)
if not isExtendable:
if len(path) > longestLength:
longestLength = len(path)
longestPath = path
print longestLength,longestPath
longestPath = []
longestLength = 0
#for n in range(1,N+1):
# print(n)
# explore([n])
shuffleCounter = 0
explore([81])
print("Longest path length is",longestLength)
print(longestPath)
| Python | 0.000001 |
936382b1744c2a9b5f3082abe9a3e0f2fbba58d0 | Return None when an error while reading config occurs | src/config.py | src/config.py | import yaml
SECTION_APP = "app"
SECTION_DEVICE = "device"
KEY_DEFAULT = "default"
def read_value(section, key):
try:
with open(".adbons.yml", 'r') as ymlfile:
config = yaml.safe_load(ymlfile)
return config[section][key]
except:
pass
def write_value(section, key, value):
try:
with open(".adbons.yml", 'r+') as ymlfile:
config = yaml.safe_load(ymlfile)
if section not in config:
config[section] = {}
config[section][key] = value
except:
config = {}
config[section] = {}
config[section][key] = value
with open(".adbons.yml", 'w') as ymlfile:
yaml.dump(config, ymlfile, default_flow_style=False)
| import yaml
SECTION_APP = "app"
SECTION_DEVICE = "device"
KEY_DEFAULT = "default"
def read_value(section, key):
with open(".adbons.yml", 'r') as ymlfile:
config = yaml.safe_load(ymlfile)
try:
return config[section][key]
except:
return ""
def write_value(section, key, value):
try:
with open(".adbons.yml", 'r+') as ymlfile:
config = yaml.safe_load(ymlfile)
if section not in config:
config[section] = {}
config[section][key] = value
except:
config = {}
config[section] = {}
config[section][key] = value
with open(".adbons.yml", 'w') as ymlfile:
yaml.dump(config, ymlfile, default_flow_style=False)
| Python | 0.000002 |
4b5cc8e2c75ae191bc134a7b3c62aa9c67ebe837 | Use six.iteritems instead of iteritems() in psutil_compat | salt/utils/psutil_compat.py | salt/utils/psutil_compat.py | # -*- coding: utf-8 -*-
'''
Version agnostic psutil hack to fully support both old (<2.0) and new (>=2.0)
psutil versions.
The old <1.0 psutil API is dropped in psutil 3.0
Should be removed once support for psutil <2.0 is dropped. (eg RHEL 6)
Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
import salt.ext.six as six
# No exception handling, as we want ImportError if psutil doesn't exist
import psutil
if psutil.version_info >= (2, 0):
from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import
else:
# Import hack to work around bugs in old psutil's
# Psuedo "from psutil import *"
_globals = globals()
for attr in psutil.__all__:
_temp = __import__('psutil', globals(), locals(), [attr], -1)
try:
_globals[attr] = getattr(_temp, attr)
except AttributeError:
pass
# Import functions not in __all__
from psutil import disk_partitions # pylint: disable=unused-import
from psutil import disk_usage # pylint: disable=unused-import
# Alias new module functions
def boot_time():
return psutil.BOOT_TIME
def cpu_count():
return psutil.NUM_CPUS
# Alias renamed module functions
pids = psutil.get_pid_list
try:
users = psutil.get_users
except AttributeError:
users = lambda: (_ for _ in ()).throw(NotImplementedError('Your '
'psutil version is too old'))
# Deprecated in 1.0.1, but not mentioned in blog post
if psutil.version_info < (1, 0, 1):
net_io_counters = psutil.network_io_counters()
class Process(psutil.Process): # pylint: disable=no-init
# Reimplement overloaded getters/setters
def cpu_affinity(self, *args, **kwargs):
if args or kwargs:
return self.set_cpu_affinity(*args, **kwargs)
else:
return self.get_cpu_affinity()
def ionice(self, *args, **kwargs):
if args or kwargs:
return self.set_ionice(*args, **kwargs)
else:
return self.get_ionice()
def nice(self, *args, **kwargs):
if args or kwargs:
return self.set_nice(*args, **kwargs)
else:
return self.get_nice()
def rlimit(self, *args, **kwargs):
'''
set_rlimit and get_limit were not introduced until psutil v1.1.0
'''
if psutil.version_info >= (1, 1, 0):
if args or kwargs:
return self.set_rlimit(*args, **kwargs)
else:
return self.get_rlimit()
else:
pass
# Alias renamed Process functions
_PROCESS_FUNCTION_MAP = {
"children": "get_children",
"connections": "get_connections",
"cpu_percent": "get_cpu_percent",
"cpu_times": "get_cpu_times",
"io_counters": "get_io_counters",
"memory_info": "get_memory_info",
"memory_info_ex": "get_ext_memory_info",
"memory_maps": "get_memory_maps",
"memory_percent": "get_memory_percent",
"num_ctx_switches": "get_num_ctx_switches",
"num_fds": "get_num_fds",
"num_threads": "get_num_threads",
"open_files": "get_open_files",
"threads": "get_threads",
"cwd": "getcwd",
}
for new, old in six.iteritems(_PROCESS_FUNCTION_MAP):
try:
setattr(Process, new, psutil.Process.__dict__[old])
except KeyError:
pass
| # -*- coding: utf-8 -*-
'''
Version agnostic psutil hack to fully support both old (<2.0) and new (>=2.0)
psutil versions.
The old <1.0 psutil API is dropped in psutil 3.0
Should be removed once support for psutil <2.0 is dropped. (eg RHEL 6)
Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html
'''
from __future__ import absolute_import
# No exception handling, as we want ImportError if psutil doesn't exist
import psutil
if psutil.version_info >= (2, 0):
from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import
else:
# Import hack to work around bugs in old psutil's
# Psuedo "from psutil import *"
_globals = globals()
for attr in psutil.__all__:
_temp = __import__('psutil', globals(), locals(), [attr], -1)
try:
_globals[attr] = getattr(_temp, attr)
except AttributeError:
pass
# Import functions not in __all__
from psutil import disk_partitions # pylint: disable=unused-import
from psutil import disk_usage # pylint: disable=unused-import
# Alias new module functions
def boot_time():
return psutil.BOOT_TIME
def cpu_count():
return psutil.NUM_CPUS
# Alias renamed module functions
pids = psutil.get_pid_list
try:
users = psutil.get_users
except AttributeError:
users = lambda: (_ for _ in ()).throw(NotImplementedError('Your '
'psutil version is too old'))
# Deprecated in 1.0.1, but not mentioned in blog post
if psutil.version_info < (1, 0, 1):
net_io_counters = psutil.network_io_counters()
class Process(psutil.Process): # pylint: disable=no-init
# Reimplement overloaded getters/setters
def cpu_affinity(self, *args, **kwargs):
if args or kwargs:
return self.set_cpu_affinity(*args, **kwargs)
else:
return self.get_cpu_affinity()
def ionice(self, *args, **kwargs):
if args or kwargs:
return self.set_ionice(*args, **kwargs)
else:
return self.get_ionice()
def nice(self, *args, **kwargs):
if args or kwargs:
return self.set_nice(*args, **kwargs)
else:
return self.get_nice()
def rlimit(self, *args, **kwargs):
'''
set_rlimit and get_limit were not introduced until psutil v1.1.0
'''
if psutil.version_info >= (1, 1, 0):
if args or kwargs:
return self.set_rlimit(*args, **kwargs)
else:
return self.get_rlimit()
else:
pass
# Alias renamed Process functions
_PROCESS_FUNCTION_MAP = {
"children": "get_children",
"connections": "get_connections",
"cpu_percent": "get_cpu_percent",
"cpu_times": "get_cpu_times",
"io_counters": "get_io_counters",
"memory_info": "get_memory_info",
"memory_info_ex": "get_ext_memory_info",
"memory_maps": "get_memory_maps",
"memory_percent": "get_memory_percent",
"num_ctx_switches": "get_num_ctx_switches",
"num_fds": "get_num_fds",
"num_threads": "get_num_threads",
"open_files": "get_open_files",
"threads": "get_threads",
"cwd": "getcwd",
}
for new, old in _PROCESS_FUNCTION_MAP.iteritems():
try:
setattr(Process, new, psutil.Process.__dict__[old])
except KeyError:
pass
| Python | 0 |
89a1a37e91ace4af2983e63ef68ff1d22811aa32 | Fix syntax error | hackeriet/cardreaderd/__init__.py | hackeriet/cardreaderd/__init__.py | #!/usr/bin/env python
from hackeriet import mifare
from hackeriet.mqtt import MQTT
from hackeriet.door import users
import os, logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
door_name = os.getenv("DOOR_NAME", 'hackeriet')
door_topic = "hackeriet/door/%s/open" % door_name
door_timeout = int(os.getenv("DOOR_TIMEOUT", 2))
mqtt = MQTT()
def main():
logging.debug('Starting main loop')
while True:
users.load()
# Read data from card reader
logging.debug('mifare: waiting for data...')
data = mifare.try_read()
if data:
logging.debug('mifare: data read')
user = users.auth(data[0:16])
if user:
ascii_user = user.encode('ascii', 'replace').decode('ascii')
logging.info('auth: card read for user %s' % ascii_user)
mqtt(door_topic, user)
else:
logging.debug('auth: card data does not belong to a user: %s' % data[0:16])
# Avoid spewing messages every single ms while a card is in front of the reader
time.sleep(door_timeout)
else:
logging.debug('mifare: no data read in last attempt')
if __name__ == "__main__":
main()
| #!/usr/bin/env python
from hackeriet import mifare
from hackeriet.mqtt import MQTT
from hackeriet.door import users
import os, logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
door_name = os.getenv("DOOR_NAME", 'hackeriet')
door_topic = "hackeriet/door/%s/open" % door_name
door_timeout = int(os.getenv("DOOR_TIMEOUT", 2))
mqtt = MQTT()
def main():
logging.debug('Starting main loop')
while True:
users.load()
# Read data from card reader
logging.debug('mifare: waiting for data...')
data = mifare.try_read()
if data:
logging.debug('mifare: data read')
user = users.auth(data[0:16])
if user:
ascii_user = user.encode('ascii', 'replace').decode('ascii')
logging.info('auth: card read for user %s' % ascii_user)
mqtt(door_topic, user)
else:
logging.debug('auth: card data does not belong to a user: %s' % data[0:16])
# Avoid spewing messages every single ms while a card is in front of the reader
time.sleep(door_timeout)
else
logging.debug('mifare: no data read in last attempt')
if __name__ == "__main__":
main()
| Python | 0.000585 |
857a251c7491b626bf948b58806b917ab20e3d1b | Make concat_example always choose the device on to_gpu | chainer/dataset/convert.py | chainer/dataset/convert.py | import numpy
import six
from chainer import cuda
def concat_examples(batch, device=None, padding=None):
"""Concatenates a list of examples into array(s).
Dataset iterator yields a list of examples. If each example is an array,
this function concatenates them along the newly-inserted first axis (called
`batch dimension`) into one array. The basic behavior is same for examples
consisting of multiple arrays, i.e., corresponding arrays of all examples
are concatenated.
For instance, consider each example consists of two arrays ``(x, y)``.
Then, this function concatenates ``x`` 's into one array, and ``y`` 's
into another array, and returns a tuple of these two arrays. Another
example: consider each example is a dictionary of two arrays. Two arrays
have keys ``'x'`` and ``'y'``. Then, this function concatenates ``x`` 's
into one array, and ``y`` 's into another array, and returns a dictionary
with two arrays ``x`` and ``y``.
When the arrays to concatenate have different shapes, the behavior depends
on the ``padding`` value. If ``padding`` is None (default), it raises an
error. Otherwise, it builds an array of the minimum shape that the contents
of all arrays can be substituted to. The padding value is then used to the
extra elements of the resulting arrays.
TODO(beam2d): Add an example.
Args:
batch (list): A list of examples. This is typically given by a dataset
iterator.
device (int): Device ID to which each array is sent. Negative value
indicates the host memory (CPU). If it is omitted, all arrays are
left in the original device.
padding: Padding value for extra elements. If this is None (default),
an error is raised on shape mismatch. Otherwise, an array of
minimum dimensionalities that can accomodate all arrays is created,
and elements outside of the examples are padded by this value.
Returns:
Array, a tuple of arrays, or a dictionary of arrays. The type depends
on the type of each example in the batch.
"""
if len(batch) == 0:
raise ValueError('batch is empty')
if device is None:
def to_device(x):
return x
elif device < 0:
to_device = cuda.to_cpu
else:
to_device = lambda x: cuda.to_gpu(x, device)
first_elem = batch[0]
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
result.append(to_device(_concat_arrays(
[example[i] for example in batch], padding[i])))
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
result[key] = to_device(_concat_arrays(
[example[key] for example in batch], padding[key]))
return result
else:
return to_device(_concat_arrays(batch, padding))
def _concat_arrays(arrays, padding):
if padding is not None:
return _concate_arrays_with_padding(arrays, padding)
xp = cuda.get_array_module(arrays[0])
with cuda.get_device(arrays[0]):
return xp.concatenate([array[None] for array in arrays])
def _concate_arrays_with_padding(arrays, padding):
shape = numpy.array(arrays[0].shape, dtype=int)
for array in arrays[1:]:
if numpy.any(shape != array.shape):
if padding is None:
raise ValueError('shape mismatch within a batch')
else:
numpy.maximum(shape, array.shape, shape)
shape = tuple(numpy.insert(shape, 0, len(arrays)))
xp = cuda.get_array_module(arrays[0])
with cuda.get_device(arrays[0]):
result = xp.full(shape, padding, dtype=arrays[0].dtype)
for i in six.moves.range(len(arrays)):
src = arrays[i]
slices = tuple(slice(dim) for dim in src.shape)
result[(i,) + slices] = src
return result
| import numpy
import six
from chainer import cuda
def concat_examples(batch, device=None, padding=None):
"""Concatenates a list of examples into array(s).
Dataset iterator yields a list of examples. If each example is an array,
this function concatenates them along the newly-inserted first axis (called
`batch dimension`) into one array. The basic behavior is same for examples
consisting of multiple arrays, i.e., corresponding arrays of all examples
are concatenated.
For instance, consider each example consists of two arrays ``(x, y)``.
Then, this function concatenates ``x`` 's into one array, and ``y`` 's
into another array, and returns a tuple of these two arrays. Another
example: consider each example is a dictionary of two arrays. Two arrays
have keys ``'x'`` and ``'y'``. Then, this function concatenates ``x`` 's
into one array, and ``y`` 's into another array, and returns a dictionary
with two arrays ``x`` and ``y``.
When the arrays to concatenate have different shapes, the behavior depends
on the ``padding`` value. If ``padding`` is None (default), it raises an
error. Otherwise, it builds an array of the minimum shape that the contents
of all arrays can be substituted to. The padding value is then used to the
extra elements of the resulting arrays.
TODO(beam2d): Add an example.
Args:
batch (list): A list of examples. This is typically given by a dataset
iterator.
device (int): Device ID to which each array is sent. Negative value
indicates the host memory (CPU). If it is omitted, all arrays are
left in the original device.
padding: Padding value for extra elements. If this is None (default),
an error is raised on shape mismatch. Otherwise, an array of
minimum dimensionalities that can accomodate all arrays is created,
and elements outside of the examples are padded by this value.
Returns:
Array, a tuple of arrays, or a dictionary of arrays. The type depends
on the type of each example in the batch.
"""
if len(batch) == 0:
raise ValueError('batch is empty')
if device is None:
def to_device(x):
return x
elif device < 0:
to_device = cuda.to_cpu
else:
to_device = cuda.to_gpu
first_elem = batch[0]
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
result.append(to_device(_concat_arrays(
[example[i] for example in batch], padding[i])))
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
result[key] = to_device(_concat_arrays(
[example[key] for example in batch], padding[key]))
return result
else:
return to_device(_concat_arrays(batch, padding))
def _concat_arrays(arrays, padding):
if padding is not None:
return _concate_arrays_with_padding(arrays, padding)
xp = cuda.get_array_module(arrays[0])
with cuda.get_device(arrays[0]):
return xp.concatenate([array[None] for array in arrays])
def _concate_arrays_with_padding(arrays, padding):
shape = numpy.array(arrays[0].shape, dtype=int)
for array in arrays[1:]:
if numpy.any(shape != array.shape):
if padding is None:
raise ValueError('shape mismatch within a batch')
else:
numpy.maximum(shape, array.shape, shape)
shape = tuple(numpy.insert(shape, 0, len(arrays)))
xp = cuda.get_array_module(arrays[0])
with cuda.get_device(arrays[0]):
result = xp.full(shape, padding, dtype=arrays[0].dtype)
for i in six.moves.range(len(arrays)):
src = arrays[i]
slices = tuple(slice(dim) for dim in src.shape)
result[(i,) + slices] = src
return result
| Python | 0.000016 |
b7f790d03511c30bfab87f1db0afb30317a7ff2e | Add retry logic for 50x responses | acapi/resources/acquiadata.py | acapi/resources/acquiadata.py | """ Acquia Cloud API data resource. """
import json
import logging
import requests
import requests_cache
import time
from platform import python_version
from pprint import pformat
from ..version import __version__
LOGGER = logging.getLogger('acapi.resources.acquiadata')
class AcquiaData(object):
"""Acquia Cloud API abstract network resource."""
#: User Agent string
USER_AGENT = 'Acquia Cloud API Client/{mver} (Python {pver})'.format(mver=__version__,
pver=python_version())
def __init__(self, uri, auth, data=None):
""" Constructor.
Parameters
----------
uri : str
The base URI for the resource.
auth : tuple
The authentication credentials to use for the request.
data : dict
Raw data from ACAPI.
"""
self.uri = uri
self.auth = auth
self.data = data
self.last_response = None
def create_task(self, uri, data):
""" Create a new task object from a responses response object.
Parameters
----------
uri: str
The URI for the action that triggered the task.
data: dict
The task data returned by the triggering request.
Returns
-------
Task
The Task object.
"""
# We have to do this here to avoid circular dependencies
from .task import Task
task = Task(uri, self.auth, data=data)
return task
def get_last_response(self):
""" Fetch the last response object. """
return self.last_response
def request(self, uri=None, method='GET', data=None, params=None, decode_json=True):
"""Perform a HTTP requests.
Parameters
----------
uri : str
The URI to use for the request.
method : str
The HTTP method to use for the request.
auth : tuple
The authentication credentials to use for the request.
data : dict
Any data to send as part of a post request body.
params : dict
Query string parameters.
Returns
-------
dict
Decoded JSON response data as a dict object.
"""
self.last_response = None
if None == uri:
uri = self.uri
headers = {'User-Agent': self.USER_AGENT}
uri = '{}.json'.format(uri)
if 'GET' == method:
attempt = 0
while attempt <= 5:
resp = requests.get(uri, auth=self.auth, headers=headers, params=params)
if resp.status_code not in range(500, 505):
# No need to retry for if not a server error type.
break
attempt += 1
params['acapi_retry'] = attempt
time.sleep((attempt ** 2.0) / 10)
# We need to unset the property or it sticks around.
if 'acapi_retry' in params:
del params['acapi_retry']
if 'POST' == method:
jdata = json.dumps(data)
resp = requests.post(uri, auth=self.auth, headers=headers, params=params, data=jdata)
# This is a sledgehammer but fine grained invalidation is messy.
requests_cache.clear()
if 'DELETE' == method:
resp = requests.delete(uri, auth=self.auth, headers=headers, params=params)
# Quickest and easiest way to do this.
requests_cache.clear()
if hasattr(resp, 'from_cache') and resp.from_cache:
LOGGER.info("%s %s returned from cache", method, uri)
self.last_response = resp
if resp.status_code != requests.codes.ok:
try:
raise resp.raise_for_status()
except requests.exceptions.HTTPError as exp:
LOGGER.info("Failed request response headers: \n%s",
pformat(exp.response.headers, indent=2))
raise
if decode_json:
return resp.json()
return resp.content
| """ Acquia Cloud API data resource. """
import json
import logging
import requests
import requests_cache
from platform import python_version
from pprint import pformat
from ..version import __version__
LOGGER = logging.getLogger('acapi.resources.acquiadata')
class AcquiaData(object):
"""Acquia Cloud API abstract network resource."""
#: User Agent string
USER_AGENT = 'Acquia Cloud API Client/{mver} (Python {pver})'.format(mver=__version__,
pver=python_version())
def __init__(self, uri, auth, data=None):
""" Constructor.
Parameters
----------
uri : str
The base URI for the resource.
auth : tuple
The authentication credentials to use for the request.
data : dict
Raw data from ACAPI.
"""
self.uri = uri
self.auth = auth
self.data = data
self.last_response = None
def create_task(self, uri, data):
""" Create a new task object from a responses response object.
Parameters
----------
uri: str
The URI for the action that triggered the task.
data: dict
The task data returned by the triggering request.
Returns
-------
Task
The Task object.
"""
# We have to do this here to avoid circular dependencies
from .task import Task
task = Task(uri, self.auth, data=data)
return task
def get_last_response(self):
""" Fetch the last response object. """
return self.last_response
def request(self, uri=None, method='GET', data=None, params=None, decode_json=True):
"""Perform a HTTP requests.
Parameters
----------
uri : str
The URI to use for the request.
method : str
The HTTP method to use for the request.
auth : tuple
The authentication credentials to use for the request.
data : dict
Any data to send as part of a post request body.
params : dict
Query string parameters.
Returns
-------
dict
Decoded JSON response data as a dict object.
"""
self.last_response = None
if None == uri:
uri = self.uri
headers = {'User-Agent': self.USER_AGENT}
uri = '{}.json'.format(uri)
if 'GET' == method:
resp = requests.get(uri, auth=self.auth, headers=headers, params=params)
if 'POST' == method:
jdata = json.dumps(data)
resp = requests.post(uri, auth=self.auth, headers=headers, params=params, data=jdata)
# This is a sledgehammer but fine grained invalidation is messy.
requests_cache.clear()
if 'DELETE' == method:
resp = requests.delete(uri, auth=self.auth, headers=headers, params=params)
# Quickest and easiest way to do this.
requests_cache.clear()
if hasattr(resp, 'from_cache') and resp.from_cache:
LOGGER.info("%s %s returned from cache", method, uri)
self.last_response = resp
if resp.status_code != requests.codes.ok:
try:
raise resp.raise_for_status()
except requests.exceptions.HTTPError as exp:
LOGGER.info("Failed request response headers: \n%s",
pformat(exp.response.headers, indent=2))
raise
if decode_json:
return resp.json()
return resp.content
| Python | 0.000001 |
15403668edf9b81b9dbb2c3b0075416e422ce55c | bump version to dev55 | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev55"
| __version__ = "1.0b1.dev54"
| Python | 0 |
c25cf82668817996b45d824cff59eed3b37b9686 | Allow QVR Pro port to be optional on config (#33901) | homeassistant/components/qvr_pro/__init__.py | homeassistant/components/qvr_pro/__init__.py | """Support for QVR Pro NVR software by QNAP."""
import logging
from pyqvrpro import Client
from pyqvrpro.client import AuthenticationError, InsufficientPermissionsError
from requests.exceptions import ConnectionError as RequestsConnectionError
import voluptuous as vol
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import (
CONF_EXCLUDE_CHANNELS,
DOMAIN,
SERVICE_START_RECORD,
SERVICE_STOP_RECORD,
)
DEFAULT_PORT = 8080
SERVICE_CHANNEL_GUID = "guid"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_EXCLUDE_CHANNELS, default=[]): vol.All(
cv.ensure_list_csv, [cv.positive_int]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_CHANNEL_RECORD_SCHEMA = vol.Schema(
{vol.Required(SERVICE_CHANNEL_GUID): cv.string}
)
def setup(hass, config):
"""Set up the QVR Pro component."""
conf = config[DOMAIN]
user = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
excluded_channels = conf[CONF_EXCLUDE_CHANNELS]
try:
qvrpro = Client(user, password, host, port=port)
channel_resp = qvrpro.get_channel_list()
except InsufficientPermissionsError:
_LOGGER.error("User must have Surveillance Management permission")
return False
except AuthenticationError:
_LOGGER.error("Authentication failed")
return False
except RequestsConnectionError:
_LOGGER.error("Error connecting to QVR server")
return False
channels = []
for channel in channel_resp["channels"]:
if channel["channel_index"] + 1 in excluded_channels:
continue
channels.append(channel)
hass.data[DOMAIN] = {"channels": channels, "client": qvrpro}
load_platform(hass, CAMERA_DOMAIN, DOMAIN, {}, config)
# Register services
def handle_start_record(call):
guid = call.data[SERVICE_CHANNEL_GUID]
qvrpro.start_recording(guid)
def handle_stop_record(call):
guid = call.data[SERVICE_CHANNEL_GUID]
qvrpro.stop_recording(guid)
hass.services.register(
DOMAIN,
SERVICE_START_RECORD,
handle_start_record,
schema=SERVICE_CHANNEL_RECORD_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_STOP_RECORD,
handle_stop_record,
schema=SERVICE_CHANNEL_RECORD_SCHEMA,
)
return True
| """Support for QVR Pro NVR software by QNAP."""
import logging
from pyqvrpro import Client
from pyqvrpro.client import AuthenticationError, InsufficientPermissionsError
from requests.exceptions import ConnectionError as RequestsConnectionError
import voluptuous as vol
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import (
CONF_EXCLUDE_CHANNELS,
DOMAIN,
SERVICE_START_RECORD,
SERVICE_STOP_RECORD,
)
SERVICE_CHANNEL_GUID = "guid"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_EXCLUDE_CHANNELS, default=[]): vol.All(
cv.ensure_list_csv, [cv.positive_int]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_CHANNEL_RECORD_SCHEMA = vol.Schema(
{vol.Required(SERVICE_CHANNEL_GUID): cv.string}
)
def setup(hass, config):
"""Set up the QVR Pro component."""
conf = config[DOMAIN]
user = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
host = conf[CONF_HOST]
port = conf.get(CONF_PORT)
excluded_channels = conf[CONF_EXCLUDE_CHANNELS]
try:
qvrpro = Client(user, password, host, port=port)
channel_resp = qvrpro.get_channel_list()
except InsufficientPermissionsError:
_LOGGER.error("User must have Surveillance Management permission")
return False
except AuthenticationError:
_LOGGER.error("Authentication failed")
return False
except RequestsConnectionError:
_LOGGER.error("Error connecting to QVR server")
return False
channels = []
for channel in channel_resp["channels"]:
if channel["channel_index"] + 1 in excluded_channels:
continue
channels.append(channel)
hass.data[DOMAIN] = {"channels": channels, "client": qvrpro}
load_platform(hass, CAMERA_DOMAIN, DOMAIN, {}, config)
# Register services
def handle_start_record(call):
guid = call.data[SERVICE_CHANNEL_GUID]
qvrpro.start_recording(guid)
def handle_stop_record(call):
guid = call.data[SERVICE_CHANNEL_GUID]
qvrpro.stop_recording(guid)
hass.services.register(
DOMAIN,
SERVICE_START_RECORD,
handle_start_record,
schema=SERVICE_CHANNEL_RECORD_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_STOP_RECORD,
handle_stop_record,
schema=SERVICE_CHANNEL_RECORD_SCHEMA,
)
return True
| Python | 0 |
508dca3ee509b1a7b8a5c79a0b00ade6dc959bb8 | Disable user related views for now | hubology/__init__.py | hubology/__init__.py | from flask import Flask
from flask import request, jsonify
from flask import render_template, current_app
from functools import wraps
import logging
import json
import urllib
import urllib2
import uuid
from flask.ext.login import LoginManager, current_user
# from hubology.models import HubUser
def geocode_location(location_name):
try:
location = None
if location_name not in ('', None):
response = urllib2.urlopen("https://maps.googleapis.com/maps/api/geocode/json?%s" %
urllib.urlencode({'address': location_name, 'sensor':'false'}))
data = response.read()
geo_info = json.loads(data)
results = geo_info.get('results')
if results is not None and len(results) > 0:
geometry = results[0].get('geometry')
if geometry is not None:
location = geometry.get('location')
return location
except:
logging.exception("problem geocoding location")
return None
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
login_manager = LoginManager()
login_manager.init_app(app)
# @login_manager.user_loader
# def load_user(userid):
# return HubUser.find(userid)
login_manager.login_view = "/sign-in"
login_manager.login_message = u"Please sign in to access hub-ology."
#Setup 404 handler
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
#Setup 500 handler
@app.errorhandler(500)
def internal_server_error(e):
if current_user:
from hubology.views.sign_out import sign_out
sign_out()
return render_template('500.html'), 500
def templated(template=None):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint \
.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
return render_template(template_name, **ctx)
return decorated_function
return decorator
@app.route('/')
@templated('index.html')
def index():
#render the main site page
return dict()
#Import other views
import hubology.views.about
import hubology.views.aboutlogo
import hubology.views.educators
import hubology.views.mentors
import hubology.views.developers
import hubology.views.designers
import hubology.views.inspire
import hubology.views.educate
import hubology.views.do
# import hubology.views.sign_in
# import hubology.views.sign_out
import hubology.views.hub
import hubology.views.map
# import hubology.views.people
import hubology.views.profile
# import hubology.views.delete_profile
| from flask import Flask
from flask import request, jsonify
from flask import render_template, current_app
from functools import wraps
import logging
import json
import urllib
import urllib2
import uuid
from flask.ext.login import LoginManager, current_user
from hubology.models import HubUser
def geocode_location(location_name):
try:
location = None
if location_name not in ('', None):
response = urllib2.urlopen("https://maps.googleapis.com/maps/api/geocode/json?%s" %
urllib.urlencode({'address': location_name, 'sensor':'false'}))
data = response.read()
geo_info = json.loads(data)
results = geo_info.get('results')
if results is not None and len(results) > 0:
geometry = results[0].get('geometry')
if geometry is not None:
location = geometry.get('location')
return location
except:
logging.exception("problem geocoding location")
return None
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
return HubUser.find(userid)
login_manager.login_view = "/sign-in"
login_manager.login_message = u"Please sign in to access hub-ology."
#Setup 404 handler
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
#Setup 500 handler
@app.errorhandler(500)
def internal_server_error(e):
if current_user:
from hubology.views.sign_out import sign_out
sign_out()
return render_template('500.html'), 500
def templated(template=None):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint \
.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
return render_template(template_name, **ctx)
return decorated_function
return decorator
@app.route('/')
@templated('index.html')
def index():
#render the main site page
return dict()
#Import other views
import hubology.views.about
import hubology.views.aboutlogo
import hubology.views.educators
import hubology.views.mentors
import hubology.views.developers
import hubology.views.designers
import hubology.views.inspire
import hubology.views.educate
import hubology.views.do
# import hubology.views.sign_in
# import hubology.views.sign_out
import hubology.views.hub
import hubology.views.map
import hubology.views.people
import hubology.views.profile
import hubology.views.delete_profile
| Python | 0 |
ec14293f02de84a12ce602d6a0dfbb3c21203bc4 | fix data types from ENV | channelstream/cli/utils.py | channelstream/cli/utils.py | import argparse
import copy
import logging
import json
import pkg_resources
import jinja2
import os
from channelstream.cli import CONFIGURABLE_PARAMS, SHARED_DEFAULTS
from channelstream.utils import set_config_types
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def main():
config = copy.deepcopy(SHARED_DEFAULTS)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
"operation", help="Operation", default=None, choices=["make_config"]
)
parser.add_argument("-j", "--json", dest="json", help="Config JSON", default=None)
parser.add_argument(
"-o", "--output", dest="output", help="Output file", required=True
)
args = parser.parse_args()
if args.json:
data_json = json.loads(args.json)
for key in CONFIGURABLE_PARAMS:
conf_value = data_json.get(key)
if conf_value:
config[key] = conf_value
else:
for key in CONFIGURABLE_PARAMS:
conf_value = os.environ.get(f"channelstream_{key}".upper())
if conf_value is not None:
config[key] = conf_value
config = set_config_types(config)
if args.operation == "make_config":
template_path = os.path.join("templates", "ini", "channelstream.ini.jinja2")
template_str = pkg_resources.resource_string("channelstream", template_path)
template = jinja2.Template(template_str.decode("utf8"))
template_vars = config
compiled = template.render(**template_vars)
with open(args.output, "w") as f:
f.write(compiled)
log.info("Config written")
| import argparse
import copy
import logging
import json
import pkg_resources
import jinja2
import os
from channelstream.cli import CONFIGURABLE_PARAMS, SHARED_DEFAULTS
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def main():
config = copy.deepcopy(SHARED_DEFAULTS)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
"operation", help="Operation", default=None, choices=["make_config"]
)
parser.add_argument("-j", "--json", dest="json", help="Config JSON", default=None)
parser.add_argument(
"-o", "--output", dest="output", help="Output file", required=True
)
args = parser.parse_args()
if args.json:
data_json = json.loads(args.json)
for key in CONFIGURABLE_PARAMS:
conf_value = data_json.get(key)
if conf_value:
config[key] = conf_value
else:
for key in CONFIGURABLE_PARAMS:
conf_value = os.environ.get(f"channelstream_{key}".upper())
if conf_value is not None:
config[key] = conf_value
if args.operation == "make_config":
template_path = os.path.join("templates", "ini", "channelstream.ini.jinja2")
template_str = pkg_resources.resource_string("channelstream", template_path)
template = jinja2.Template(template_str.decode("utf8"))
template_vars = config
compiled = template.render(**template_vars)
with open(args.output, "w") as f:
f.write(compiled)
log.info("Config written")
| Python | 0.000002 |
ac1f44247a2c3b943641e076154bacab3299ceec | Remove unused user.(show|hide)PastEvents (jsonrpc) | indico/MaKaC/services/implementation/user.py | indico/MaKaC/services/implementation/user.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from flask import session
from indico.modules.users import User
from indico.util.i18n import _
from indico.util.redis import avatar_links
from MaKaC.services.interface.rpc.common import ServiceError
from MaKaC.services.implementation.base import LoggedOnlyService, AdminService, ParameterManager
from MaKaC.user import AvatarHolder
class UserBaseService(LoggedOnlyService):
def _checkParams(self):
self._pm = ParameterManager(self._params)
userId = self._pm.extract("userId", None)
if userId is not None:
ah = AvatarHolder()
self._target = ah.getById(userId)
else:
raise ServiceError("ERR-U5", _("User id not specified"))
class UserModifyBase(UserBaseService):
def _checkProtection(self):
LoggedOnlyService._checkProtection(self)
if self._aw.getUser():
if not self._target.canModify(self._aw):
raise ServiceError("ERR-U6", _("You are not allowed to perform this request"))
else:
raise ServiceError("ERR-U7", _("You are currently not authenticated. Please log in again."))
class UserGetEmail(LoggedOnlyService):
def _checkParams(self):
LoggedOnlyService._checkParams(self)
self._target = self.getAW().getUser()
def _getAnswer(self):
if self._target:
return self._target.getEmail()
else:
raise ServiceError("ERR-U4", "User is not logged in")
class UserRefreshRedisLinks(AdminService):
def _checkParams(self):
AdminService._checkParams(self)
self._pm = ParameterManager(self._params)
user_id = self._pm.extract("userId", pType=int, allowEmpty=True)
self._user = User.get(user_id) if user_id is not None else session.user
def _getAnswer(self):
avatar_links.delete_avatar(self._user) # clean start
avatar_links.init_links(self._user)
methodMap = {
"data.email.get": UserGetEmail,
"refreshRedisLinks": UserRefreshRedisLinks
}
| # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from flask import session
from indico.modules.users import User
from indico.util.i18n import _
from indico.util.redis import avatar_links
from MaKaC.services.interface.rpc.common import ServiceError
from MaKaC.services.implementation.base import LoggedOnlyService, AdminService, ParameterManager
from MaKaC.user import AvatarHolder
class UserBaseService(LoggedOnlyService):
def _checkParams(self):
self._pm = ParameterManager(self._params)
userId = self._pm.extract("userId", None)
if userId is not None:
ah = AvatarHolder()
self._target = ah.getById(userId)
else:
raise ServiceError("ERR-U5", _("User id not specified"))
class UserModifyBase(UserBaseService):
def _checkProtection(self):
LoggedOnlyService._checkProtection(self)
if self._aw.getUser():
if not self._target.canModify(self._aw):
raise ServiceError("ERR-U6", _("You are not allowed to perform this request"))
else:
raise ServiceError("ERR-U7", _("You are currently not authenticated. Please log in again."))
class UserGetEmail(LoggedOnlyService):
def _checkParams(self):
LoggedOnlyService._checkParams(self)
self._target = self.getAW().getUser()
def _getAnswer(self):
if self._target:
return self._target.getEmail()
else:
raise ServiceError("ERR-U4", "User is not logged in")
class UserShowPastEvents(UserModifyBase):
def _getAnswer(self):
self._target.getPersonalInfo().setShowPastEvents(True)
return True
class UserHidePastEvents(UserModifyBase):
def _getAnswer(self):
self._target.getPersonalInfo().setShowPastEvents(False)
return True
class UserRefreshRedisLinks(AdminService):
def _checkParams(self):
AdminService._checkParams(self)
self._pm = ParameterManager(self._params)
user_id = self._pm.extract("userId", pType=int, allowEmpty=True)
self._user = User.get(user_id) if user_id is not None else session.user
def _getAnswer(self):
avatar_links.delete_avatar(self._user) # clean start
avatar_links.init_links(self._user)
methodMap = {
"data.email.get": UserGetEmail,
"showPastEvents": UserShowPastEvents,
"hidePastEvents": UserHidePastEvents,
"refreshRedisLinks": UserRefreshRedisLinks
}
| Python | 0.000001 |
ae780b08e27f8567b028dd3411de8829f4f1bfed | Add an option for the number of dimensions in the external FSI config. | SU2_PY/FSI/io/FSI_config.py | SU2_PY/FSI/io/FSI_config.py | #!/usr/bin/env python
# -*-coding:utf-8 -*
# \file FSI_config.py
# \brief Python class for handling configuration file for FSI computation.
# \author THOMAS David, University of Liege, Belgium. Department of Aerospace and Mechanical Engineering
# \version BETA
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
from ..util import switch
# ----------------------------------------------------------------------
# FSI Configuration Class
# ----------------------------------------------------------------------
class FSIConfig:
"""
Class that contains all the parameters coming from the FSI configuration file.
Read the file and store all the options into a dictionary.
"""
def __init__(self,FileName):
self.ConfigFileName = FileName
self._ConfigContent = {}
self.readConfig()
def __str__(self):
tempString = str()
for key, value in self._ConfigContent.items():
tempString += "{} = {}\n".format(key,value)
return tempString
def __getitem__(self,key):
return self._ConfigContent[key]
def __setitem__(self, key, value):
self._ConfigContent[key] = value
def readConfig(self):
input_file = open(self.ConfigFileName)
while 1:
line = input_file.readline()
if not line:
break
# remove line returns
line = line.strip('\r\n')
# make sure it has useful data
if (not "=" in line) or (line[0] == '%'):
continue
# split across equal sign
line = line.split("=",1)
this_param = line[0].strip()
this_value = line[1].strip()
for case in switch(this_param):
#integer values
if case("NDIM") : pass
#if case("MESH_DEF_LIN_ITER") : pass
#if case("MESH_DEF_NONLIN_ITER") : pass
if case("RESTART_ITER") : pass
if case("NB_EXT_ITER") : pass
if case("NB_FSI_ITER") :
self._ConfigContent[this_param] = int(this_value)
break
#float values
if case("AITKEN_PARAM") : pass
if case("START_TIME") : pass
if case("UNST_TIMESTEP") : pass
if case("UNST_TIME") : pass
if case("FSI_TOLERANCE") :
self._ConfigContent[this_param] = float(this_value)
break
#string values
if case("CFD_CONFIG_FILE_NAME") : pass
if case("CSD_SOLVER") : pass
if case("CSD_CONFIG_FILE_NAME") : pass
if case("RESTART_SOL") : pass
if case("MATCHING_MESH") : pass
if case("DISP_PRED") : pass
if case("AITKEN_RELAX") : pass
if case("UNSTEADY_SIMULATION") : pass
if case("INTERNAL_FLOW") :
#if case("MESH_DEF_METHOD") : pass
self._ConfigContent[this_param] = this_value
break
if case():
print(this_param + " is an invalid option !")
break
#end for
#def dump()
| #!/usr/bin/env python
# -*-coding:utf-8 -*
# \file FSI_config.py
# \brief Python class for handling configuration file for FSI computation.
# \author THOMAS David, University of Liege, Belgium. Department of Aerospace and Mechanical Engineering
# \version BETA
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
from ..util import switch
# ----------------------------------------------------------------------
# FSI Configuration Class
# ----------------------------------------------------------------------
class FSIConfig:
"""
Class that contains all the parameters coming from the FSI configuration file.
Read the file and store all the options into a dictionary.
"""
def __init__(self,FileName):
self.ConfigFileName = FileName
self._ConfigContent = {}
self.readConfig()
def __str__(self):
tempString = str()
for key, value in self._ConfigContent.items():
tempString += "{} = {}\n".format(key,value)
return tempString
def __getitem__(self,key):
return self._ConfigContent[key]
def __setitem__(self, key, value):
self._ConfigContent[key] = value
def readConfig(self):
input_file = open(self.ConfigFileName)
while 1:
line = input_file.readline()
if not line:
break
# remove line returns
line = line.strip('\r\n')
# make sure it has useful data
if (not "=" in line) or (line[0] == '%'):
continue
# split across equal sign
line = line.split("=",1)
this_param = line[0].strip()
this_value = line[1].strip()
for case in switch(this_param):
#integer values
#if case("NDIM") : pass
#if case("MESH_DEF_LIN_ITER") : pass
#if case("MESH_DEF_NONLIN_ITER") : pass
if case("RESTART_ITER") : pass
if case("NB_EXT_ITER") : pass
if case("NB_FSI_ITER") :
self._ConfigContent[this_param] = int(this_value)
break
#float values
if case("AITKEN_PARAM") : pass
if case("START_TIME") : pass
if case("UNST_TIMESTEP") : pass
if case("UNST_TIME") : pass
if case("FSI_TOLERANCE") :
self._ConfigContent[this_param] = float(this_value)
break
#string values
if case("CFD_CONFIG_FILE_NAME") : pass
if case("CSD_SOLVER") : pass
if case("CSD_CONFIG_FILE_NAME") : pass
if case("RESTART_SOL") : pass
if case("MATCHING_MESH") : pass
if case("DISP_PRED") : pass
if case("AITKEN_RELAX") : pass
if case("UNSTEADY_SIMULATION") : pass
if case("INTERNAL_FLOW") :
#if case("MESH_DEF_METHOD") : pass
self._ConfigContent[this_param] = this_value
break
if case():
print(this_param + " is an invalid option !")
break
#end for
#def dump()
| Python | 0 |
23fd2953a41d8b087fa5252df2de0baf36244e43 | remove stupid debug string | doc/readthedoc/conf.py | doc/readthedoc/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath('_build_temp/python'))
# -- Project information -----------------------------------------------------
project = 'FATE'
copyright = '2020, FederatedAI'
author = 'FederatedAI'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'autodocsumm',
'recommonmark'
]
autosummary_generate = True
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
add_module_names = False
master_doc = 'index'
# hack to replace rst file link to html link
def ultimateReplace(app, docname, source):
result = source[0]
result = result.replace(".rst", ".html")
source[0] = result
def setup(app):
if not os.path.exists("_build_temp"):
import shutil
import tempfile
from pathlib import Path
with tempfile.TemporaryDirectory() as d:
shutil.copytree("../..", Path(d).joinpath("_build_temp"))
shutil.copytree(Path(d).joinpath("_build_temp"), "_build_temp")
app.add_config_value('ultimate_replacements', {}, True)
app.connect('source-read', ultimateReplace)
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath('_build_temp/python'))
print("sage sage sage")
# -- Project information -----------------------------------------------------
project = 'FATE'
copyright = '2020, FederatedAI'
author = 'FederatedAI'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'autodocsumm',
'recommonmark'
]
autosummary_generate = True
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
add_module_names = False
master_doc = 'index'
# hack to replace rst file link to html link
def ultimateReplace(app, docname, source):
result = source[0]
result = result.replace(".rst", ".html")
source[0] = result
def setup(app):
if not os.path.exists("_build_temp"):
import shutil
import tempfile
from pathlib import Path
with tempfile.TemporaryDirectory() as d:
shutil.copytree("../..", Path(d).joinpath("_build_temp"))
shutil.copytree(Path(d).joinpath("_build_temp"), "_build_temp")
app.add_config_value('ultimate_replacements', {}, True)
app.connect('source-read', ultimateReplace)
| Python | 0.001727 |
f2181d50fb17be9e1db6129300d720139ca00636 | use absolute imports for compatibility with python 2.5 | scrapy/selector/__init__.py | scrapy/selector/__init__.py | """
XPath selectors
Two backends are currently available: libxml2 and lxml
To select the backend explicitly use the SELECTORS_BACKEND variable in your
project. Otherwise, libxml2 will be tried first. If libxml2 is not available,
lxml will be used.
"""
from scrapy.conf import settings
if settings['SELECTORS_BACKEND'] == 'lxml':
from scrapy.selector.lxmlsel import *
elif settings['SELECTORS_BACKEND'] == 'libxml2':
from scrapy.selector.libxml2sel import *
elif settings['SELECTORS_BACKEND'] == 'dummy':
from scrapy.selector.dummysel import *
else:
try:
import libxml2
except ImportError:
try:
import lxml
except ImportError:
from scrapy.selector.dummysel import *
else:
from scrapy.selector.lxmlsel import *
else:
from scrapy.selector.libxml2sel import *
| """
XPath selectors
Two backends are currently available: libxml2 and lxml
To select the backend explicitly use the SELECTORS_BACKEND variable in your
project. Otherwise, libxml2 will be tried first. If libxml2 is not available,
lxml will be used.
"""
from scrapy.conf import settings
if settings['SELECTORS_BACKEND'] == 'lxml':
from .lxmlsel import *
elif settings['SELECTORS_BACKEND'] == 'libxml2':
from .libxml2sel import *
elif settings['SELECTORS_BACKEND'] == 'dummy':
from .dummysel import *
else:
try:
import libxml2
except ImportError:
try:
import lxml
except ImportError:
from .dummysel import *
else:
from .lxmlsel import *
else:
from .libxml2sel import *
| Python | 0 |
f3da704e0c603574d7ff56b8b4d66ac2c34d015a | Output image fix | Server/src/server/reporters/tiled_brick_position_reporter.py | Server/src/server/reporters/tiled_brick_position_reporter.py | import cv2
from reporter import Reporter
class TiledBrickPositionReporter(Reporter):
def __init__(self, valid_locations, board_recognizer, board_descriptor, tile_brick_detector, camera):
"""
:param valid_locations Locations to search for brick in
:param board_recognizer Board recognizer
:param board_descriptor Board descriptor
:param tile_brick_detector Tile brick detector
:param camera Camera
"""
self.valid_locations = valid_locations
self.board_recognizer = board_recognizer
self.board_descriptor = board_descriptor
self.tile_brick_detector = tile_brick_detector
self.camera = camera
def run(self):
"""
Waits for brick to be positioned at any of the valid positions.
Callback function: (tile) -> ()
"""
while not self.stopped:
image = self.camera.read()
if image is None:
continue
self.board_descriptor.snapshot = self.board_recognizer.find_board(image, self.board_descriptor)
if self.board_descriptor.is_recognized():
cv2.imwrite("output_board_recognized.png", self.board_descriptor.snapshot.board_image)
tile = self.tile_brick_detector.find_brick_among_tiles(self.board_descriptor, self.valid_locations)
if tile is not None:
cv2.imwrite("output_brick_recognized.png", image)
self.callback_function(tile)
self.stop()
else:
cv2.imwrite("output_board_not_recognized.png", image)
| import cv2
from reporter import Reporter
class TiledBrickPositionReporter(Reporter):
def __init__(self, valid_locations, board_recognizer, board_descriptor, tile_brick_detector, camera):
"""
:param valid_locations Locations to search for brick in
:param board_recognizer Board recognizer
:param board_descriptor Board descriptor
:param tile_brick_detector Tile brick detector
:param camera Camera
"""
self.valid_locations = valid_locations
self.board_recognizer = board_recognizer
self.board_descriptor = board_descriptor
self.tile_brick_detector = tile_brick_detector
self.camera = camera
def run(self):
"""
Waits for brick to be positioned at any of the valid positions.
Callback function: (tile) -> ()
"""
while not self.stopped:
image = self.camera.read()
if image is None:
continue
self.board_descriptor.snapshot = self.board_recognizer.find_board(image, self.board_descriptor)
if self.board_descriptor.is_recognized():
cv2.imwrite("output_board_recognized.png", self.board_descriptor.snapshot.board_image)
tile = self.tile_brick_detector.find_brick_among_tiles(self.board_descriptor, self.valid_locations)
if tile is not None:
cv2.imwrite("output_brick_recognized.png", image)
self.callback_function(tile)
self.stop()
else:
cv2.imwrite("output_board_not_recognized.png", self.board_descriptor.snapshot.board_image)
| Python | 0.999992 |
d2cadcb9be08730f5ccefec5f3e0316265ebf307 | Check request ID value | integration-tests/features/src/json_utils.py | integration-tests/features/src/json_utils.py | """Functions for handling JSON responses returned by various API endpoints."""
import string
from src.attribute_checks import *
def get_value_using_path(obj, path):
"""Get the attribute value using the XMLpath-like path specification.
Return any attribute stored in the nested object and list hierarchy using
the 'path' where path consists of:
keys (selectors)
indexes (in case of arrays)
separated by slash, ie. "key1/0/key_x".
Usage:
get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"}
get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z"
get_value_using_path(["x", "y", "z"], "0")) -> "x"
get_value_using_path(["x", "y", "z"], "1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b"
"""
keys = path.split("/")
for key in keys:
if key.isdigit():
obj = obj[int(key)]
else:
obj = obj[key]
return obj
def check_timestamp_in_json_response(context, attribute):
"""Check if the timestamp stored in given attribute is correct."""
timestamp = context.response.json().get(attribute)
check_timestamp(timestamp)
def check_request_id_value_in_json_response(context, attribute_name):
"""Check the request ID attribute in the JSON response.
Check if ID is stored in a format like: '71769af6-0a39-4242-94be-1f84f04c8a56'
"""
response = context.response
assert response is not None
json_data = response.json()
assert json_data is not None
check_attribute_presence(json_data, attribute_name)
id_attribute = json_data[attribute_name]
assert id_attribute is not None
assert check_uuid(id_attribute)
def check_id_value_in_json_response(context, id_attribute_name):
"""Check the ID attribute in the JSON response.
Check if ID is stored in a format like: '477e85660c504b698beae2b5f2a28b4e'
ie. it is a string with 32 characters containing 32 hexadecimal digits
"""
response = context.response
assert response is not None
json_data = response.json()
assert json_data is not None
check_attribute_presence(json_data, id_attribute_name)
id_attribute = json_data[id_attribute_name]
assert id_attribute is not None
assert isinstance(id_attribute, str) and len(id_attribute) == 32
assert all(char in string.hexdigits for char in id_attribute)
def is_empty_json_response(context):
"""Check if the JSON response is empty (but not None)."""
return context.response.json() == {}
| """Functions for handling JSON responses returned by various API endpoints."""
import string
from src.attribute_checks import *
def get_value_using_path(obj, path):
"""Get the attribute value using the XMLpath-like path specification.
Return any attribute stored in the nested object and list hierarchy using
the 'path' where path consists of:
keys (selectors)
indexes (in case of arrays)
separated by slash, ie. "key1/0/key_x".
Usage:
get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"}
get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z"
get_value_using_path(["x", "y", "z"], "0")) -> "x"
get_value_using_path(["x", "y", "z"], "1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b"
"""
keys = path.split("/")
for key in keys:
if key.isdigit():
obj = obj[int(key)]
else:
obj = obj[key]
return obj
def check_timestamp_in_json_response(context, attribute):
"""Check if the timestamp stored in given attribute is correct."""
timestamp = context.response.json().get(attribute)
check_timestamp(timestamp)
def check_id_value_in_json_response(context, id_attribute_name):
"""Check the ID attribute in the JSON response.
Check if ID is stored in a format like: '477e85660c504b698beae2b5f2a28b4e'
ie. it is a string with 32 characters containing 32 hexadecimal digits
"""
response = context.response
assert response is not None
json_data = response.json()
assert json_data is not None
check_attribute_presence(json_data, id_attribute_name)
id_attribute = json_data[id_attribute_name]
assert id_attribute is not None
assert isinstance(id_attribute, str) and len(id_attribute) == 32
assert all(char in string.hexdigits for char in id_attribute)
def is_empty_json_response(context):
"""Check if the JSON response is empty (but not None)."""
return context.response.json() == {}
| Python | 0 |
de4e5a34aaa322b2ce83161dd4bce7897953ab73 | add Unix socket support to API collector | intelmq/bots/collectors/api/collector_api.py | intelmq/bots/collectors/api/collector_api.py | # SPDX-FileCopyrightText: 2018 tavi.poldma
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# -*- coding: utf-8 -*-
"""
API Collector bot
"""
from threading import Thread
from typing import Optional
import os
import socket
from intelmq.lib.bot import CollectorBot
from intelmq.lib.exceptions import MissingDependencyError
try:
import tornado.web
from tornado.ioloop import IOLoop
from tornado.netutil import bind_unix_socket
from tornado.httpserver import HTTPServer
except ImportError:
IOLoop = None
else:
class Application(tornado.web.Application):
def __init__(self, request_handler, *args, **kwargs):
self.request_handler = request_handler
super().__init__(*args, **kwargs)
class MainHandler(tornado.web.RequestHandler):
def post(self):
data = self.request.body
self.application.request_handler(data)
class APICollectorBot(CollectorBot):
"""Collect data by exposing a HTTP API interface"""
name: str = "API"
port: int = 5000
__collector_empty_process: bool = True
provider: str = "APICollector"
__is_multithreadable: bool = False
use_socket = False
socket_path = '/tmp/imq_api_default_socket'
_server: Optional[HTTPServer] = None
_unix_socket: Optional[socket.socket] = None
def init(self):
if IOLoop is None:
raise MissingDependencyError("tornado")
app = Application(self.request_handler, [
("/intelmq/push", MainHandler),
])
if self.use_socket:
self.server = HTTPServer(app)
self._unix_socket = bind_unix_socket(self.socket_path)
self.server.add_socket(self._unix_socket)
else:
self.server = app.listen(self.port)
self.eventLoopThread = Thread(target=IOLoop.current().start)
self.eventLoopThread.daemon = True
self.eventLoopThread.start()
def request_handler(self, data):
report = self.new_report()
report.add("raw", data)
self.send_message(report)
def process(self):
pass
def shutdown(self):
if self.server:
# Closes the server and the socket, prevents address already in use
self.server.stop()
if IOLoop.current():
IOLoop.current().stop()
BOT = APICollectorBot
| # SPDX-FileCopyrightText: 2018 tavi.poldma
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# -*- coding: utf-8 -*-
"""
API Collector bot
"""
from threading import Thread
from intelmq.lib.bot import CollectorBot
from intelmq.lib.exceptions import MissingDependencyError
try:
import tornado.web
from tornado.ioloop import IOLoop
except ImportError:
IOLoop = None
else:
class Application(tornado.web.Application):
def __init__(self, request_handler, *args, **kwargs):
self.request_handler = request_handler
super().__init__(*args, **kwargs)
class MainHandler(tornado.web.RequestHandler):
def post(self):
data = self.request.body
self.application.request_handler(data)
class APICollectorBot(CollectorBot):
"""Collect data by exposing a HTTP API interface"""
name: str = "API"
port: int = 5000
__collector_empty_process: bool = True
provider: str = "APICollector"
__is_multithreadable: bool = False
def init(self):
if IOLoop is None:
raise MissingDependencyError("tornado")
app = Application(self.request_handler, [
("/intelmq/push", MainHandler),
])
self.server = app.listen(self.port)
self.eventLoopThread = Thread(target=IOLoop.current().start)
self.eventLoopThread.daemon = True
self.eventLoopThread.start()
def request_handler(self, data):
report = self.new_report()
report.add("raw", data)
self.send_message(report)
def process(self):
pass
def shutdown(self):
if self.server:
# Closes the server and the socket, prevents address already in use
self.server.stop()
if IOLoop.current():
IOLoop.current().stop()
BOT = APICollectorBot
| Python | 0 |
5b9c9ab8f8aef01c53b761714bb6b7072fa01aa4 | clean up commandArgs construction for HadoopJob in Python client | genie-client/src/main/python/pygenie/jobs/hadoop.py | genie-client/src/main/python/pygenie/jobs/hadoop.py | """
genie.jobs.hadoop
This module implements creating Hadoop jobs.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from .core import GenieJob
from .utils import (add_to_repr,
arg_string)
logger = logging.getLogger('com.netflix.genie.jobs.hadoop')
class HadoopJob(GenieJob):
"""Hadoop job."""
def __init__(self, conf=None):
super(HadoopJob, self).__init__(conf=conf)
self._properties = dict()
self._property_file = None
self._script = None
@property
def cmd_args(self):
"""
The constructed command line arguments using the job's definition. If the
command line arguments are set explicitly (by calling
:py:meth:`command_arguments`) this will be the same.
"""
if self._command_arguments is not None:
return self._command_arguments
props_str = ' '.join([
'-D{name}={value}'.format(name=k, value=v) \
for k, v in self._properties.iteritems()
])
prop_file_str = '-conf {}'.format(os.path.basename(self._property_file)) \
if self._property_file \
else ''
return '{prop_file} {props} {cmd}' \
.format(prop_file=prop_file_str,
props=props_str,
cmd=self._script or '') \
.strip()
def command(self, script):
"""Alias for :py:meth:`HadoopJob.script`"""
return self.script(script)
@add_to_repr('append')
def property(self, name, value):
"""
Sets a property for the job.
Using the name and value passed in, the following will be constructed in
the command-line when executing:
'-Dname=value'
Example:
>>> job = HadoopJob() \\
... .property('mapred.foo', 'fizz') \\
... .property('mapred.bar', 'buzz')
Args:
name (str): The property name.
value (str): The property value.
Returns:
:py:class:`HadoopJob`: self
"""
self._properties[name] = value
return self
@arg_string
@add_to_repr('overwrite')
def property_file(self, _property_file):
"""
Sets a configuration/property file for the job.
Using the value passed in, the following will be constructed in the
command-line when executing:
'-conf file'
Example:
>>> job = HadoopJob() \\
... .property_file('/Users/jsmith/my_properties.conf')
Args:
_property_file (str): The path to the property file.
Returns:
:py:class:`HadoopJob`: self
"""
self._add_dependency(_property_file)
return self
@arg_string
@add_to_repr('overwrite')
def script(self, _script):
"""
Sets the script to run for the job.
Example:
>>> job = HadoopJob() \\
... .script("/Users/jdoe/my_job.jar")
>>> job = HadoopJob() \\
... .script("version")
>>> job = HadoopJob() \\
... .script("fs -ls /dir/")
Args:
script (str): A path to a script file or the code to run.
Returns:
:py:class:`HadoopJob`: self
"""
| """
genie.jobs.hadoop
This module implements creating Hadoop jobs.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from .core import GenieJob
from .utils import (add_to_repr,
arg_string)
logger = logging.getLogger('com.netflix.genie.jobs.hadoop')
class HadoopJob(GenieJob):
"""Hadoop job."""
def __init__(self, conf=None):
super(HadoopJob, self).__init__(conf=conf)
self._properties = dict()
self._property_file = None
self._script = None
@property
def cmd_args(self):
"""
The constructed command line arguments using the job's definition. If the
command line arguments are set explicitly (by calling
:py:meth:`command_arguments`) this will be the same.
"""
if self._command_arguments is not None:
return self._command_arguments
props_str = ' '.join([
'-D{name}={value}'.format(name=k, value=v) \
for k, v in self._properties.iteritems()
])
prop_file_str = '-conf {}'.format(os.path.basename(self._property_file)) \
if self._property_file \
else ''
return '{prop_file} {props} {cmd}' \
.format(prop_file=prop_file_str,
props=props_str,
cmd=self._script) \
.strip()
def command(self, script):
"""Alias for :py:meth:`HadoopJob.script`"""
return self.script(script)
@add_to_repr('append')
def property(self, name, value):
"""
Sets a property for the job.
Using the name and value passed in, the following will be constructed in
the command-line when executing:
'-Dname=value'
Example:
>>> job = HadoopJob() \\
... .property('mapred.foo', 'fizz') \\
... .property('mapred.bar', 'buzz')
Args:
name (str): The property name.
value (str): The property value.
Returns:
:py:class:`HadoopJob`: self
"""
self._properties[name] = value
return self
@arg_string
@add_to_repr('overwrite')
def property_file(self, _property_file):
"""
Sets a configuration/property file for the job.
Using the value passed in, the following will be constructed in the
command-line when executing:
'-conf file'
Example:
>>> job = HadoopJob() \\
... .property_file('/Users/jsmith/my_properties.conf')
Args:
_property_file (str): The path to the property file.
Returns:
:py:class:`HadoopJob`: self
"""
self._add_dependency(_property_file)
return self
@arg_string
@add_to_repr('overwrite')
def script(self, _script):
"""
Sets the script to run for the job.
Example:
>>> job = HadoopJob() \\
... .script("/Users/jdoe/my_job.jar")
>>> job = HadoopJob() \\
... .script("version")
>>> job = HadoopJob() \\
... .script("fs -ls /dir/")
Args:
script (str): A path to a script file or the code to run.
Returns:
:py:class:`HadoopJob`: self
"""
| Python | 0 |
7dc01fa4593e81448db2749d460737cbfa57b63d | Return normalized version | wger/__init__.py | wger/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (1, 9, 0, 'beta', 1)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '.dev0'
return main + sub
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (1, 9, 0, 'beta', 1)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '-dev'
return main + sub
| Python | 0.000005 |
a7a14619f7662ccb510b6a0031a58647cf0b34e7 | Remove duplicated path for build script | whack/builder.py | whack/builder.py | import os
import subprocess
from . import downloads
from .tempdir import create_temporary_dir
from .common import WHACK_ROOT
from .files import mkdir_p, write_file
from .errors import FileNotFoundError
def build(package_request, package_dir):
with create_temporary_dir() as build_dir:
_build_in_dir(package_request, build_dir, package_dir)
def _build_in_dir(package_request, build_dir, package_dir):
params = package_request.params()
package_request.write_source_to(build_dir)
build_script = "whack/build"
build_script_path = os.path.join(build_dir, build_script)
if not os.path.exists(build_script_path):
message = "{0} script not found in package source {1}".format(
build_script, package_request.source_uri
)
raise FileNotFoundError(message)
build_env = _params_to_build_env(params)
_fetch_downloads(build_dir, build_env)
mkdir_p(package_dir)
build_command = [
"whack-run",
os.path.abspath(package_dir), # package_dir is mounted at WHACK_ROOT
build_script_path, # build_script is executed
WHACK_ROOT # WHACK_ROOT is passed as the first argument to build_script
]
subprocess.check_call(build_command, cwd=build_dir, env=build_env)
write_file(
os.path.join(package_dir, ".whack-package-name"),
package_request.name()
)
def _fetch_downloads(build_dir, build_env):
downloads_file_path = os.path.join(build_dir, "whack/downloads")
downloads.fetch_downloads(downloads_file_path, build_env, build_dir)
def _params_to_build_env(params):
build_env = os.environ.copy()
for name, value in (params or {}).iteritems():
build_env[name.upper()] = str(value)
return build_env
| import os
import subprocess
from . import downloads
from .tempdir import create_temporary_dir
from .common import WHACK_ROOT
from .files import mkdir_p, write_file
from .errors import FileNotFoundError
def build(package_request, package_dir):
with create_temporary_dir() as build_dir:
_build_in_dir(package_request, build_dir, package_dir)
def _build_in_dir(package_request, build_dir, package_dir):
params = package_request.params()
package_request.write_source_to(build_dir)
build_script = os.path.join(build_dir, "whack/build")
if not os.path.exists(build_script):
message = "whack/build script not found in package source {0}".format(
package_request.source_uri
)
raise FileNotFoundError(message)
build_env = _params_to_build_env(params)
_fetch_downloads(build_dir, build_env)
mkdir_p(package_dir)
build_command = [
"whack-run",
os.path.abspath(package_dir), # package_dir is mounted at WHACK_ROOT
build_script, # build_script is executed
WHACK_ROOT # WHACK_ROOT is passed as the first argument to build_script
]
subprocess.check_call(build_command, cwd=build_dir, env=build_env)
write_file(
os.path.join(package_dir, ".whack-package-name"),
package_request.name()
)
def _fetch_downloads(build_dir, build_env):
downloads_file_path = os.path.join(build_dir, "whack/downloads")
downloads.fetch_downloads(downloads_file_path, build_env, build_dir)
def _params_to_build_env(params):
build_env = os.environ.copy()
for name, value in (params or {}).iteritems():
build_env[name.upper()] = str(value)
return build_env
| Python | 0.000001 |
4c017462c41ad080c1f6a98f8be7ef843f379253 | Fix test name | tests/search_backend_sphinx.py | tests/search_backend_sphinx.py | from wolis.test_case import WolisTestCase
from wolis import utils
class SearchBackendSphinxTest(WolisTestCase):
@utils.restrict_database('mysql*', 'postgres')
@utils.restrict_phpbb_version('>=3.1.0')
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='config[search_type]',
value='phpbb_search_fulltext_sphinx',
confirm=True,
)
if __name__ == '__main__':
import unittest
unittest.main()
| from wolis.test_case import WolisTestCase
from wolis import utils
class SearchBackendMysqlTest(WolisTestCase):
@utils.restrict_database('mysql*', 'postgres')
@utils.restrict_phpbb_version('>=3.1.0')
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='config[search_type]',
value='phpbb_search_fulltext_sphinx',
confirm=True,
)
if __name__ == '__main__':
import unittest
unittest.main()
| Python | 0.001029 |
58c6cf44fd73aa4d33d48f1defe2ec65e6f20c50 | Add debugging info. | docs/src/conf.py | docs/src/conf.py | # -*- coding: utf-8 -*-
import os
import shutil
import logging
from datetime import datetime
from subprocess import call, Popen, PIPE
log = logging.getLogger(__name__)
try:
import simplejson as json
except ImportError:
import json
def fake_ignore(cwd, contents):
for entry in contents:
log.info('Copying %s/%s to its final destination...', cwd, entry)
return []
def prepare(globs, locs):
git = Popen('which git 2> %s' % os.devnull, shell=True, stdout=PIPE
).stdout.read().strip()
doxygen = Popen('which doxygen 2> %s' % os.devnull, shell=True, stdout=PIPE
).stdout.read().strip()
cwd = os.getcwd()
root = os.path.abspath(os.path.join(cwd, '..', '..'))
print "Running from %s..." % (root, )
os.chdir(root)
buildenv = os.path.join(root, 'vendor', 'erebot', 'buildenv')
generic_doc = os.path.join(root, 'docs', 'src', 'generic')
origin = Popen([git, 'config', '--local', 'remote.origin.url'],
stdout=PIPE).stdout.read().strip()
project = origin.rpartition('/')[2]
if project.endswith('.git'):
project = project[:-4]
locs['project'] = project
git_tag = Popen(['git', 'describe', '--tags', '--exact', '--first-parent'],
stdout=PIPE).communicate()[0].strip()
if git_tag:
locs['version'] = locs['release'] = git_tag
else:
locs['version'] = locs['release'] = 'latest'
for repository, path in (
('git://github.com/Erebot/Erebot_Buildenv.git', buildenv),
('git://github.com/Erebot/Erebot_Module_Skeleton_Doc.git', generic_doc)
):
if not os.path.isdir(path):
os.makedirs(path)
print "Cloning %s into %s..." % (repository, path)
call([git, 'clone', repository, path])
else:
os.chdir(path)
print "Updating clone of %s in %s..." % (repository, path)
call([git, 'checkout', 'master'])
call([git, 'pull'])
os.chdir(root)
composer = json.load(open(os.path.join(root, 'composer.json'), 'r'))
# Run doxygen
call([doxygen, os.path.join(root, 'Doxyfile')], env={
'COMPONENT_NAME': locs['project'],
'COMPONENT_VERSION': locs['version'],
'COMPONENT_BRIEF': composer.get('description', ''),
})
# Copy doxygen output to Sphinx's output folder
try:
shutil.copytree(
os.path.join(root, 'docs', 'api', 'html'),
os.path.join(root, 'docs', 'enduser', 'html', 'api'),
ignore=fake_ignore,
)
except OSError:
pass
os.chdir(cwd)
real_conf = os.path.join(buildenv, 'sphinx', 'conf.py')
print "Including real configuration file (%s)..." % (real_conf, )
execfile(real_conf, globs, locs)
locs['copyright'] = u'2012-%d, XRL Team. All rights reserved' % \
datetime.now().year
prepare(globals(), locals())
| # -*- coding: utf-8 -*-
import os
import shutil
from datetime import datetime
from subprocess import call, Popen, PIPE
try:
import simplejson as json
except ImportError:
import json
def prepare(globs, locs):
git = Popen('which git 2> %s' % os.devnull, shell=True, stdout=PIPE
).stdout.read().strip()
doxygen = Popen('which doxygen 2> %s' % os.devnull, shell=True, stdout=PIPE
).stdout.read().strip()
cwd = os.getcwd()
root = os.path.abspath(os.path.join(cwd, '..', '..'))
print "Running from %s..." % (root, )
os.chdir(root)
buildenv = os.path.join(root, 'vendor', 'erebot', 'buildenv')
generic_doc = os.path.join(root, 'docs', 'src', 'generic')
origin = Popen([git, 'config', '--local', 'remote.origin.url'],
stdout=PIPE).stdout.read().strip()
project = origin.rpartition('/')[2]
if project.endswith('.git'):
project = project[:-4]
locs['project'] = project
git_tag = Popen(['git', 'describe', '--tags', '--exact', '--first-parent'],
stdout=PIPE).communicate()[0].strip()
if git_tag:
locs['version'] = locs['release'] = git_tag
else:
locs['version'] = locs['release'] = 'latest'
for repository, path in (
('git://github.com/Erebot/Erebot_Buildenv.git', buildenv),
('git://github.com/Erebot/Erebot_Module_Skeleton_Doc.git', generic_doc)
):
if not os.path.isdir(path):
os.makedirs(path)
print "Cloning %s into %s..." % (repository, path)
call([git, 'clone', repository, path])
else:
os.chdir(path)
print "Updating clone of %s in %s..." % (repository, path)
call([git, 'checkout', 'master'])
call([git, 'pull'])
os.chdir(root)
composer = json.load(open(os.path.join(root, 'composer.json'), 'r'))
# Run doxygen
call([doxygen, os.path.join(root, 'Doxyfile')], env={
'COMPONENT_NAME': locs['project'],
'COMPONENT_VERSION': locs['version'],
'COMPONENT_BRIEF': composer.get('description', ''),
})
# Copy doxygen output to Sphinx's output folder
try:
shutil.copytree(
os.path.join(root, 'docs', 'api', 'html'),
os.path.join(root, 'docs', 'enduser', 'html', 'api'),
)
except OSError:
pass
os.chdir(cwd)
real_conf = os.path.join(buildenv, 'sphinx', 'conf.py')
print "Including real configuration file (%s)..." % (real_conf, )
execfile(real_conf, globs, locs)
locs['copyright'] = u'2012-%d, XRL Team. All rights reserved' % \
datetime.now().year
prepare(globals(), locals())
| Python | 0 |
b3ef8f04fa7abd688d7c8669b4f1dfeda2a55c81 | test fixed | tests/test_resource_manager.py | tests/test_resource_manager.py | # from . import ManagerTestBase
# from flask import json
# import datetime
# from app import db
# from stargate.resource_info import resource_info
# from stargate.const import ResourceInfoConst
# from app.models import TestPrimaryKey
# from app import init_app, db
# from functools import partial
# class TestResourceManager(ManagerTestBase):
# @classmethod
# def setUpClass(self):
# super(TestResourceManager, self).setUpClass()
# def test_collection_name(self):
# response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"})
# self.assertEqual(response._status_code, 200)
# response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"})
# self.assertEqual(response._status_code, 200)
# def test_url_prefix(self):
# response = self.client.get('/v1/city', headers={"Content-Type": "application/json"})
# self.assertEqual(response._status_code, 200)
# def test_resource_fields(self):
# response = self.client.get('/api/location', headers={"Content-Type": "application/json"})
# data = json.loads(response.get_data())
# data = data['data']
# for key in data:
# keys = list(key['attributes'].keys())
# self.assertCountEqual(keys, ['latitude','longitude'])
# def test_resource_exclude(self):
# response = self.client.get('/v1/city', headers={"Content-Type": "application/json"})
# data = json.loads(response.get_data())
# data = data['data']
# for key in data:
# keys = list(key['attributes'].keys())
# self.assertNotIn(['latitude','longitude'], keys)
# def test_view_decorators(self):
# response = self.client.get('/api/testprimarykey', headers={"Content-Type": "application/json", "X_AUTH_KEY":"1234567"})
# self.assertEqual(response._status_code, 200)
# func = partial(self.client.get, '/api/testprimarykey', headers={"Content-Type": "application/json"})
# self.assertRaises(ValueError, func)
# def test_resource_http_methods(self):
# response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"})
# self.assertEqual(response._status_code, 200)
# response = self.client.post('/api/mycustomcollection', headers={"Content-Type": "application/json"})
# self.assertEqual(response._status_code, 405)
# def test_custom_primary_key_field(self):
# primary_key = resource_info(ResourceInfoConst.PRIMARY_KEY, TestPrimaryKey)
# self.assertEqual(primary_key, 'ser_id') | from . import ManagerTestBase
from flask import json
import datetime
from app import db
from stargate.resource_info import resource_info
from stargate.const import ResourceInfoConst
from app.models import TestPrimaryKey
from app import init_app, db
from functools import partial
class TestResourceManager(ManagerTestBase):
@classmethod
def setUpClass(self):
super(TestResourceManager, self).setUpClass()
def test_collection_name(self):
response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"})
self.assertEqual(response._status_code, 200)
response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"})
self.assertEqual(response._status_code, 200)
def test_url_prefix(self):
response = self.client.get('/v1/city', headers={"Content-Type": "application/json"})
self.assertEqual(response._status_code, 200)
def test_resource_fields(self):
response = self.client.get('/api/location', headers={"Content-Type": "application/json"})
data = json.loads(response.get_data())
data = data['data']
for key in data:
keys = list(key['attributes'].keys())
self.assertCountEqual(keys, ['latitude','longitude'])
def test_resource_exclude(self):
response = self.client.get('/v1/city', headers={"Content-Type": "application/json"})
data = json.loads(response.get_data())
data = data['data']
for key in data:
keys = list(key['attributes'].keys())
self.assertNotIn(['latitude','longitude'], keys)
def test_view_decorators(self):
response = self.client.get('/api/testprimarykey', headers={"Content-Type": "application/json", "X_AUTH_KEY":"1234567"})
self.assertEqual(response._status_code, 200)
func = partial(self.client.get, '/api/testprimarykey', headers={"Content-Type": "application/json"})
self.assertRaises(ValueError, func)
def test_resource_http_methods(self):
response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"})
self.assertEqual(response._status_code, 200)
response = self.client.post('/api/mycustomcollection', headers={"Content-Type": "application/json"})
self.assertEqual(response._status_code, 405)
def test_custom_primary_key_field(self):
primary_key = resource_info(ResourceInfoConst.PRIMARY_KEY, TestPrimaryKey)
self.assertEqual(primary_key, 'ser_id') | Python | 0 |
e721511a24f98e57e8bfeb45a953d7d42cf78f33 | increase the max length of a link that is to be shortenend to 500 characters | teeny_weeny/models.py | teeny_weeny/models.py | from django.db import models
from django.utils import timezone
class ShortLink(models.Model):
short = models.CharField(max_length=128, unique=True)
link = models.URLField(max_length=500)
hit = models.BigIntegerField(default=0)
date = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return u'%s' % (self.short)
| from django.db import models
from django.utils import timezone
class ShortLink(models.Model):
short = models.CharField(max_length=128, unique=True)
link = models.URLField()
hit = models.BigIntegerField(default=0)
date = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return u'%s' % (self.short) | Python | 0.000061 |
517ffe9a3d2ca3608b8044e88d74d16fe5e65db1 | Use new Sphinx Autodoc mock import path (#17634) | docs/exts/docroles.py | docs/exts/docroles.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
"""Document roles"""
from functools import partial
from docutils import nodes, utils
from sphinx.ext.autodoc.importer import import_module
from sphinx.ext.autodoc.mock import mock
class RoleException(Exception):
"""Exception for roles extension"""
def get_template_field(env, fullname):
"""
Gets template fields for specific operator class.
:param env: env config
:param fullname: Full path to operator class.
For example: ``airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator``
:return: List of template field
:rtype: list[str]
"""
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException(f"Error loading {modname} module.")
clazz = getattr(mod, classname)
if not clazz:
raise RoleException(f"Error finding {classname} class in {modname} module.")
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(f"Could not find the template fields for {classname} class in {modname} module.")
return list(template_fields)
def template_field_role(
app,
typ,
rawtext,
text,
lineno,
inliner,
options=None,
content=None,
):
"""
A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.operators.bash.BashOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles)
"""
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error(
f"invalid class name {text} \n{e}",
line=lineno,
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], []
def setup(app):
"""Sets the extension up"""
from docutils.parsers.rst import roles
roles.register_local_role("template-fields", partial(template_field_role, app))
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
"""Document roles"""
from functools import partial
from docutils import nodes, utils
from sphinx.ext.autodoc.importer import import_module, mock
class RoleException(Exception):
"""Exception for roles extension"""
def get_template_field(env, fullname):
"""
Gets template fields for specific operator class.
:param env: env config
:param fullname: Full path to operator class.
For example: ``airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator``
:return: List of template field
:rtype: list[str]
"""
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException(f"Error loading {modname} module.")
clazz = getattr(mod, classname)
if not clazz:
raise RoleException(f"Error finding {classname} class in {modname} module.")
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(f"Could not find the template fields for {classname} class in {modname} module.")
return list(template_fields)
def template_field_role(
app,
typ,
rawtext,
text,
lineno,
inliner,
options=None,
content=None,
):
"""
A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.operators.bash.BashOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles)
"""
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error(
f"invalid class name {text} \n{e}",
line=lineno,
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], []
def setup(app):
"""Sets the extension up"""
from docutils.parsers.rst import roles
roles.register_local_role("template-fields", partial(template_field_role, app))
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
| Python | 0 |
027c9d24ecf00a8435ad012fdab9e64b4201ed42 | fix migration conflict, re #7128 | arches/app/models/migrations/7128_resource_instance_filter.py | arches/app/models/migrations/7128_resource_instance_filter.py | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '7442_delete_manifest_images_table'),
]
operations = [
migrations.RunSQL("""
UPDATE d_data_types
SET defaultconfig = defaultconfig || '{"searchString": "", "searchDsl": ""}'::jsonb
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE nodes
SET config = config || '{"searchString": "", "searchDsl": ""}'::jsonb
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE public.widgets
SET defaultconfig = defaultconfig || '{"defaultResourceInstance": []}'::jsonb
WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget';
""","""
UPDATE nodes
SET config = config - 'searchString' - 'searchDsl'
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE d_data_types
SET defaultconfig = defaultconfig - 'searchString' - 'searchDsl'
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE public.widgets
SET defaultconfig = defaultconfig - 'defaultResourceInstance'
WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget';
""")
]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '7262_report_template_data_fetch_bool'),
]
operations = [
migrations.RunSQL("""
UPDATE d_data_types
SET defaultconfig = defaultconfig || '{"searchString": "", "searchDsl": ""}'::jsonb
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE nodes
SET config = config || '{"searchString": "", "searchDsl": ""}'::jsonb
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE public.widgets
SET defaultconfig = defaultconfig || '{"defaultResourceInstance": []}'::jsonb
WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget';
""","""
UPDATE nodes
SET config = config - 'searchString' - 'searchDsl'
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE d_data_types
SET defaultconfig = defaultconfig - 'searchString' - 'searchDsl'
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE public.widgets
SET defaultconfig = defaultconfig - 'defaultResourceInstance'
WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget';
""")
]
| Python | 0 |
e987a010f2242735ad60008774d25c00b7f89f76 | Tweak CI report | CI/CITests.py | CI/CITests.py | import os
from OMPython import OMCSessionZMQ
class CITests():
'''
Python class used to run CI tests
'''
def __init__(self, rootPath):
'''
Constructor starts omc and loads MSL
'''
self.rootPath = rootPath
self.omc = OMCSessionZMQ()
os.chdir(self.rootPath)
self.omc.sendExpression("loadModel(Modelica)")
def loadLib(self, libName, libPath):
# Attempt to load the library
if self.omc.sendExpression('loadFile("%s")' % (self.rootPath + libPath)):
print "Load success: %s" % libName
else:
errmsg = libName + " was not loaded! Check the library path:\n" + libPath
raise Exception(errmsg)
def runSyntaxCheck(self, libName, libPath):
# Load library
self.loadLib(libName,libPath)
'''
Checks all of the models in the library and returns number of faild checks
'''
# Get the list of all classes in OpenIPSL
test_list = self.omc.sendExpression('getClassNames(%s,recursive=true)' % libName)
nFailed = 0
nPassed = 0
# Run the check for all classes that are model and print result msgs
for test in test_list:
if self.omc.sendExpression("isModel(%s)" % (test)): # Check if a class is a model
passMsg = self.omc.sendExpression("checkModel(%s)" % (test))
if "completed successfully." in passMsg:
nPassed += 1
else:
failMsg = self.omc.sendExpression("getErrorString()")
print failMsg
nFailed += 1
# Print a check summary
if nFailed == 0:
str1 = "== %s ----------------------" % libName
print "%s OK! == Models checked: %s" % (str1[:22], nPassed)
else:
print "==== Check Summary for %s ====" % libName
print "Number of models that passed the check is: %s" % nPassed
print "Number of models that failed the check is: %s" % nFailed
# Return test result
return (nFailed == 0)
| import os
from OMPython import OMCSessionZMQ
class CITests():
'''
Python class used to run CI tests
'''
def __init__(self, rootPath):
'''
Constructor starts omc and loads MSL
'''
self.rootPath = rootPath
self.omc = OMCSessionZMQ()
os.chdir(self.rootPath)
self.omc.sendExpression("loadModel(Modelica)")
def loadLib(self, libPath):
# Attempt to load the library
if self.omc.sendExpression('loadFile("%s")' % (self.rootPath + libPath)):
print "%s is successfully loaded." % libPath
else:
errmsg = libPath + " was not loaded! Check the library path."
raise Exception(errmsg)
def runSyntaxCheck(self, libName, libPath):
# Load library
self.loadLib(libPath)
'''
Checks all of the models in the library and returns number of faild checks
'''
# Get the list of all classes in OpenIPSL
test_list = self.omc.sendExpression('getClassNames(%s,recursive=true)' % libName)
nFailed = 0
nPassed = 0
# Run the check for all classes that are model and print result msgs
for test in test_list:
if self.omc.sendExpression("isModel(%s)" % (test)): # Check if a class is a model
passMsg = self.omc.sendExpression("checkModel(%s)" % (test))
if "completed successfully." in passMsg:
# print passMsg
nPassed += 1
else:
failMsg = self.omc.sendExpression("getErrorString()")
print failMsg
nFailed += 1
# Print a check summary
if nFailed == 0:
str1 = "== %s --------------------" % libName
print "%s OK! (%s models checked)" % (str1[:20], nPassed)
else:
print "==== Check Summary for %s ====" % libName
print "Number of models that passed the check is: %s" % nPassed
print "Number of models that failed the check is: %s" % nFailed
# Return test result
return (nFailed == 0)
| Python | 0 |
31a607f13536fcaefa8decffe1769d1dc66e78e4 | Use empty dict for default package description | whack/sources.py | whack/sources.py | import os
import json
import shutil
import tempfile
import uuid
import blah
from .hashes import Hasher
from .files import mkdir_p, copy_dir
class PackageSourceNotFound(Exception):
def __init__(self, package_name):
message = "Could not find source for package: {0}".format(package_name)
Exception.__init__(self, message)
class PackageSourceFetcher(object):
def fetch(self, package):
if blah.is_source_control_uri(package):
return self._fetch_package_from_source_control(package)
elif self._is_local_path(package):
return PackageSource(package)
else:
raise PackageSourceNotFound(package)
def _fetch_package_from_source_control(self, package):
package_source_dir = _temporary_path()
try:
blah.archive(package, package_source_dir)
return TemporaryPackageSource(package_source_dir)
except:
shutil.rmtree(package_source_dir)
raise
def _is_local_uri(self, uri):
return "://" not in uri
def _is_local_path(self, path):
return path.startswith("/") or path.startswith(".")
def _temporary_path():
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
class PackageSource(object):
def __init__(self, path):
self.path = path
self._description = _read_package_description(path)
def name(self):
return self._description.name()
def source_hash(self):
hasher = Hasher()
for source_path in self._source_paths():
absolute_source_path = os.path.join(self.path, source_path)
hasher.update_with_dir(absolute_source_path)
return hasher.ascii_digest()
def write_to(self, target_dir):
for source_dir in self._source_paths():
target_sub_dir = os.path.join(target_dir, source_dir)
mkdir_p(target_sub_dir)
copy_dir(os.path.join(self.path, source_dir), target_sub_dir)
def _source_paths(self):
return ["whack"]
def __enter__(self):
return self
def __exit__(self, *args):
pass
class TemporaryPackageSource(object):
def __init__(self, path):
self._path = path
def __enter__(self):
return PackageSource(self._path)
def __exit__(self, *args):
shutil.rmtree(self._path)
def _read_package_description(package_src_dir):
whack_json_path = os.path.join(package_src_dir, "whack/whack.json")
if os.path.exists(whack_json_path):
with open(whack_json_path, "r") as whack_json_file:
whack_json = json.load(whack_json_file)
else:
whack_json = {}
return DictBackedPackageDescription(whack_json)
class DictBackedPackageDescription(object):
def __init__(self, values):
self._values = values
def name(self):
return self._values.get("name", None)
| import os
import json
import shutil
import tempfile
import uuid
import blah
from .hashes import Hasher
from .files import mkdir_p, copy_dir
class PackageSourceNotFound(Exception):
def __init__(self, package_name):
message = "Could not find source for package: {0}".format(package_name)
Exception.__init__(self, message)
class PackageSourceFetcher(object):
def fetch(self, package):
if blah.is_source_control_uri(package):
return self._fetch_package_from_source_control(package)
elif self._is_local_path(package):
return PackageSource(package)
else:
raise PackageSourceNotFound(package)
def _fetch_package_from_source_control(self, package):
package_source_dir = _temporary_path()
try:
blah.archive(package, package_source_dir)
return TemporaryPackageSource(package_source_dir)
except:
shutil.rmtree(package_source_dir)
raise
def _is_local_uri(self, uri):
return "://" not in uri
def _is_local_path(self, path):
return path.startswith("/") or path.startswith(".")
def _temporary_path():
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
class PackageSource(object):
def __init__(self, path):
self.path = path
self._description = _read_package_description(path)
def name(self):
return self._description.name()
def source_hash(self):
hasher = Hasher()
for source_path in self._source_paths():
absolute_source_path = os.path.join(self.path, source_path)
hasher.update_with_dir(absolute_source_path)
return hasher.ascii_digest()
def write_to(self, target_dir):
for source_dir in self._source_paths():
target_sub_dir = os.path.join(target_dir, source_dir)
mkdir_p(target_sub_dir)
copy_dir(os.path.join(self.path, source_dir), target_sub_dir)
def _source_paths(self):
return ["whack"]
def __enter__(self):
return self
def __exit__(self, *args):
pass
class TemporaryPackageSource(object):
def __init__(self, path):
self._path = path
def __enter__(self):
return PackageSource(self._path)
def __exit__(self, *args):
shutil.rmtree(self._path)
def _read_package_description(package_src_dir):
whack_json_path = os.path.join(package_src_dir, "whack/whack.json")
if os.path.exists(whack_json_path):
with open(whack_json_path, "r") as whack_json_file:
whack_json = json.load(whack_json_file)
return DictBackedPackageDescription(whack_json)
else:
return DefaultPackageDescription()
class DefaultPackageDescription(object):
def name(self):
return None
class DictBackedPackageDescription(object):
def __init__(self, values):
self._values = values
def name(self):
return self._values.get("name", None)
| Python | 0 |
7725821156795b613340bd8098583fdbb189a6d3 | fix minor bug and update response msg | wildlife/rest.py | wildlife/rest.py | from wildlife import WildApp
import os
from flask import jsonify, make_response
from wildlife import kz_exceptions
import logging
import json
import exceptions
import functools
# change to current directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
conf_path = "./config/wildlife.yml"
app = WildApp("wildlife for zookeeper",
conf_path)
def cluster_znode_exception(func):
@functools.wraps(func)
def wrapper(cluster_name, znode):
try:
return func(cluster_name, znode)
except (kz_exceptions.ConnectionClosedError,
kz_exceptions.ConnectionDropped,
kz_exceptions.ConnectionLoss,
kz_exceptions.ConnectionLossException):
return make_response("Connection Exception When Interacts "
"with Cluster [%s].\n" % cluster_name,
408)
except kz_exceptions.NoNodeException:
return make_response("Cannot Find Znode [%s] in Cluster"
"[%s].\n" % (znode, cluster_name),
404)
except kz_exceptions.InvalidACLException:
return make_response("Invalid ACLs on Accessing Znode [%s] in "
"Cluster [%s].\n" % (znode, cluster_name),
401)
except kz_exceptions.NoAuthException:
return make_response("Please Provide ACLs to Access Znode [%s] in "
"Cluster [%s].\n" % (znode, cluster_name),
401)
except exceptions:
return make_response("Unable to Handle this Request.\n",
500)
return wrapper
@app.route("/")
def hello():
return make_response("Welcome to WildLife: The REST API for ZooKeeper!\n",
200)
@app.route("/wildlife", methods=["GET"])
def clusters():
return make_response(jsonify({"clusters": app.clusters.keys()}),
200)
@app.route("/wildlife/<cluster_name>", methods=["GET"])
def detail_cluster(cluster_name):
return make_response(jsonify(app.clusters[cluster_name].__dict__),
200)
@app.route("/wildlife/<cluster_name>/<znode>", methods=["GET"])
@cluster_znode_exception
def cluster_znode(cluster_name, znode):
_zclient_manager = app.managers[cluster_name]
_zclient = _zclient_manager._client
zdata = _zclient.get(znode)
return make_response(jsonify({"data": zdata[0],
"znodeStat": convert_zstat(zdata[1])
}),
200)
@app.route("/wildlife/<cluster_name>/<znode>/data", methods=["GET"])
@cluster_znode_exception
def cluster_znode_data(cluster_name, znode):
zdata = cluster_znode(cluster_name, znode)
zdata = json.loads(zdata)
return make_response(zdata["data"],
200)
@app.route("/wildlife/<cluster_name>/<znode>/children", methods=["GET"])
@cluster_znode_exception
def cluster_znode_children(cluster_name, znode):
_zclient_manager = app.managers[cluster_name]
_zclient = _zclient_manager._client
zchildren = _zclient.get_children(znode)
return make_response(str(zchildren),
200)
def convert_zstat(znodestat):
return {"czxid": znodestat.czxid,
"mzxid": znodestat.mzxid,
"ctime": znodestat.ctime,
"mtime": znodestat.mtime,
"version": znodestat.version,
"cversion": znodestat.cversion,
"aversion": znodestat.aversion,
"ephemeralOwner": znodestat.ephemeralOwner,
"dataLength": znodestat.dataLength,
"numChildren": znodestat.numChildren,
"pzxid": znodestat.pzxid}
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s: '
'(%(threadName)-10s) %(message)s')
app.run()
| from wildlife import WildApp
import os
from flask import jsonify, make_response
from wildlife import kz_exceptions
import logging
import json
import exceptions
import functools
# change to current directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
conf_path = "./config/wildlife.yml"
app = WildApp("wildlife for zookeeper",
conf_path)
def cluster_znode_exception(func):
@functools.wraps(func)
def wrapper(cluster_name, znode):
try:
func(cluster_name, znode)
except (kz_exceptions.ConnectionClosedError,
kz_exceptions.ConnectionDropped,
kz_exceptions.ConnectionLoss,
kz_exceptions.ConnectionLossException):
return make_response("Connection Exception When Interacts "
"with Cluster %s.\n" % cluster_name,
408)
except kz_exceptions.NoNodeError:
return make_response("Cannot Find Znode %s in Cluster"
"%s.\n" % (znode, cluster_name),
404)
except kz_exceptions.InvalidACLException:
return make_response("Invalid ACLs on Accessing Znode %s in "
"Cluster %s.\n" % (znode, cluster_name),
401)
except exceptions:
return make_response("Unable to Handle this Request.\n",
500)
return wrapper
@app.route("/")
def hello():
return make_response("Welcome to WildLife: The REST API for ZooKeeper!\n",
200)
@app.route("/wildlife", methods=["GET"])
def clusters():
return make_response(jsonify({"clusters": app.clusters.keys()}),
200)
@app.route("/wildlife/<cluster_name>", methods=["GET"])
def detail_cluster(cluster_name):
return make_response(jsonify(app.clusters[cluster_name].__dict__),
200)
@app.route("/wildlife/<cluster_name>/<znode>", methods=["GET"])
@cluster_znode_exception
def cluster_znode(cluster_name, znode):
_zclient_manager = app.managers[cluster_name]
_zclient = _zclient_manager._client
zdata = _zclient.get(znode)
return make_response(jsonify({"data": zdata[0],
"znodeStat": convert_zstat(zdata[1])
}),
200)
@app.route("/wildlife/<cluster_name>/<znode>/data", methods=["GET"])
@cluster_znode_exception
def cluster_znode_data(cluster_name, znode):
zdata = cluster_znode(cluster_name, znode)
zdata = json.loads(zdata)
return make_response(zdata["data"],
200)
@app.route("/wildlife/<cluster_name>/<znode>/children", methods=["GET"])
@cluster_znode_exception
def cluster_znode_children(cluster_name, znode):
_zclient_manager = app.managers[cluster_name]
_zclient = _zclient_manager._client
zchildren = _zclient.get_children(znode)
return make_response(str(zchildren),
200)
def convert_zstat(znodestat):
return {"czxid": znodestat.czxid,
"mzxid": znodestat.mzxid,
"ctime": znodestat.ctime,
"mtime": znodestat.mtime,
"version": znodestat.version,
"cversion": znodestat.cversion,
"aversion": znodestat.aversion,
"ephemeralOwner": znodestat.ephemeralOwner,
"dataLength": znodestat.dataLength,
"numChildren": znodestat.numChildren,
"pzxid": znodestat.pzxid}
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s: '
'(%(threadName)-10s) %(message)s')
app.run()
| Python | 0 |
33c51e6a0612aece239bf01236f110ef9fb40c86 | Add some uncovered code | wordcount_lib.py | wordcount_lib.py | def consume(filename):
chars = 0
words = 0
lines = 0
with open(filename, 'rt') as fp:
for line in fp:
lines += 1
words += len(line.strip().split())
chars += len(line)
return chars, words, lines
def daaaangerous(param=0):
print("I'm the most dangerous function West of the Missippi, no test "\
"will cover me!")
return 3 / param
| def consume(filename):
chars = 0
words = 0
lines = 0
with open(filename, 'rt') as fp:
for line in fp:
lines += 1
words += len(line.strip().split())
chars += len(line)
return chars, words, lines
| Python | 0.000002 |
610446ee84b02372bdd98e4530e9be9e6898c3ec | Fix #3 issue. | textmagic/rest/models/chats.py | textmagic/rest/models/chats.py | from . import Model, CollectionModel
class ChatMessage(Model):
"""
A Chat Message object model
.. attribute:: id
.. attribute:: direction
.. attribute:: sender
.. attribute:: messageTime
.. attribute:: text
.. attribute:: receiver
.. attribute:: deleted
.. attribute:: userId
.. attribute:: status
.. attribute:: total
.. attribute:: firstName
.. attribute:: lastName
"""
class ChatMessages(CollectionModel):
instance = ChatMessage
name = "chats"
searchable = False
class Chat(Model):
"""
A Chat object model
.. attribute:: id
.. attribute:: phone
.. attribute:: contact
Dictionary like this:
::
{
"id": 4329702,
"firstName": "Jonh",
"lastName": "Doe",
"companyName": "",
"phone": "19025555555",
"email": "",
"country": {
"id": "CA",
"name": "Canada"
},
"customFields": [
{
"value": "1970-01-01",
"id": 1111,
"name": "Birthday",
"createdAt": "2015-04-10T06:51:02+0000"
}
]
}
.. attribute:: unread
.. attribute:: updatedAt
"""
class Chats(CollectionModel):
name = "chats"
instance = Chat
searchable = False
def list(self, **kwargs):
"""
Returns a list of :class:`Chat` objects and a pager dict.
:Example:
chats, pager = client.chats.list()
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
kwargs["search"] = False
return self.get_instances(kwargs)
def by_phone(self, phone, **kwargs):
"""
Fetch messages from chat with specified phone number.
:Example:
chat = client.chats.by_phone(phone="447624800500")
:param str phone: Phone number in E.164 format.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
chat_messages = ChatMessages(self.base_uri, self.auth)
return self.get_subresource_instances(uid=phone, instance=chat_messages, params=kwargs) | from . import Model, CollectionModel
class ChatMessage(Model):
"""
A Chat Message object model
.. attribute:: id
.. attribute:: direction
.. attribute:: sender
.. attribute:: messageTime
.. attribute:: text
.. attribute:: receiver
.. attribute:: deleted
.. attribute:: userId
.. attribute:: status
.. attribute:: total
.. attribute:: firstName
.. attribute:: lastName
"""
class ChatMessages(CollectionModel):
instance = ChatMessage
name = "chats"
searchable = False
class Chat(Model):
"""
A Chat object model
.. attribute:: id
.. attribute:: phone
.. attribute:: contact
Dictionary like this:
::
{
"id": 4329702,
"firstName": "Jonh",
"lastName": "Doe",
"companyName": "",
"phone": "19025555555",
"email": "",
"country": {
"id": "CA",
"name": "Canada"
},
"customFields": [
{
"value": "1970-01-01",
"id": 1111,
"name": "Birthday",
"createdAt": "2015-04-10T06:51:02+0000"
}
]
}
.. attribute:: unread
.. attribute:: updatedAt
"""
class Chats(CollectionModel):
name = "chats"
instance = Chat
searchable = False
def list(self, **kwargs):
"""
Returns a list of :class:`Chat` objects and a pager dict.
:Example:
chats, pager = client.chats.list()
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
kwargs["search"] = False
return self.get_instances(kwargs)
def by_phone(self, phone=0, **kwargs):
"""
Fetch messages from chat with specified phone number.
:Example:
chat = client.chats.by_phone(phone="447624800500")
:param str phone: Phone number in E.164 format.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
chat_messages = ChatMessages(self.base_uri, self.auth)
return self.get_subresource_instances(uid=phone, instance=chat_messages, params=kwargs) | Python | 0 |
05cb079fd4e6b7a9bfd32c1470c9c638af5b7bc9 | Add comments clarifying implementation choices | importlib_metadata/_py39compat.py | importlib_metadata/_py39compat.py | """
Compatibility layer with Python 3.8/3.9
"""
from typing import TYPE_CHECKING, Any, Optional, Tuple
if TYPE_CHECKING: # -> prevent circular imports on runtime.
from . import Distribution, EntryPoint
else:
Distribution = EntryPoint = Any
def normalized_name(dist: Distribution) -> Optional[str]:
"""
Honor name normalization for distributions that don't provide ``_normalized_name``.
"""
try:
return dist._normalized_name
except AttributeError:
from . import Prepared # -> delay to prevent circular imports.
return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name'])
def ep_matches(ep: EntryPoint, **params) -> Tuple[EntryPoint, bool]:
"""
Workaround for ``EntryPoint`` objects without the ``matches`` method.
For the sake of convenience, a tuple is returned containing not only the
boolean value corresponding to the predicate evalutation, but also a compatible
``EntryPoint`` object that can be safely used at a later stage.
For example, the following sequences of expressions should be compatible:
# Sequence 1: using the compatibility layer
candidates = (_py39compat.ep_matches(ep, **params) for ep in entry_points)
[ep for ep, predicate in candidates if predicate]
# Sequence 2: using Python 3.9+
[ep for ep in entry_points if ep.matches(**params)]
"""
try:
return ep, ep.matches(**params)
except AttributeError:
from . import EntryPoint # -> delay to prevent circular imports.
# Reconstruct the EntryPoint object to make sure it is compatible.
_ep = EntryPoint(ep.name, ep.value, ep.group)
return _ep, _ep.matches(**params)
| """
Compatibility layer with Python 3.8/3.9
"""
from typing import TYPE_CHECKING, Any, Optional, Tuple
if TYPE_CHECKING:
from . import Distribution, EntryPoint
else:
Distribution = EntryPoint = Any
def normalized_name(dist: Distribution) -> Optional[str]:
"""
Honor name normalization for distributions that don't provide ``_normalized_name``.
"""
try:
return dist._normalized_name
except AttributeError:
from . import Prepared
return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name'])
def ep_matches(ep: EntryPoint, **params) -> Tuple[EntryPoint, bool]:
"""
Workaround for ``EntryPoint`` objects without the ``matches`` method.
For the sake of convenience, a tuple is returned containing not only the
boolean value corresponding to the predicate evalutation, but also a compatible
``EntryPoint`` object that can be safely used at a later stage.
For example, the following sequences of expressions should be compatible:
# Sequence 1: using the compatibility layer
candidates = (_py39compat.ep_matches(ep, **params) for ep in entry_points)
[ep for ep, predicate in candidates if predicate]
# Sequence 2: using Python 3.9+
[ep for ep in entry_points if ep.matches(**params)]
"""
try:
return ep, ep.matches(**params)
except AttributeError:
from . import EntryPoint
# Reconstruct the EntryPoint object to make sure it is compatible.
_ep = EntryPoint(ep.name, ep.value, ep.group)
return _ep, _ep.matches(**params)
| Python | 0 |
4bd53d96be49c01c04a30d2c064774bac23fc20a | Rewrite entry update in DatabaseStorage without explicit update call | speedinfo/storage/database/storage.py | speedinfo/storage/database/storage.py | # coding: utf-8
from django.db import IntegrityError
from django.db.models import ExpressionWrapper, F, FloatField, IntegerField
from django.forms import model_to_dict
from speedinfo.models import ViewProfiler
from speedinfo.storage.base import AbstractStorage
from speedinfo.storage.database.models import Storage
class DatabaseStorage(AbstractStorage):
def add(self, view_name, method, is_anon_call, is_cache_hit, sql_time, sql_count, view_execution_time):
try:
vp, created = Storage.objects.get_or_create(view_name=view_name, method=method)
except IntegrityError:
# IntegrityError raised in the case of concurrent access
# to get_or_create method from another application worker/thread
vp = Storage.objects.get(view_name=view_name, method=method)
vp.anon_calls = F("anon_calls") + (is_anon_call and 1 or 0)
vp.cache_hits = F("cache_hits") + (is_cache_hit and 1 or 0)
vp.sql_total_time = F("sql_total_time") + sql_time
vp.sql_total_count = F("sql_total_count") + sql_count
vp.total_calls = F("total_calls") + 1
vp.total_time = F("total_time") + view_execution_time
vp.save()
def fetch_all(self, ordering=None):
qs = Storage.objects.annotate(
anon_calls_ratio=ExpressionWrapper(100.0 * F("anon_calls") / F("total_calls"), output_field=FloatField()),
cache_hits_ratio=ExpressionWrapper(100.0 * F("cache_hits") / F("total_calls"), output_field=FloatField()),
sql_count_per_call=ExpressionWrapper(F("sql_total_count") / F("total_calls"), output_field=IntegerField()),
sql_time_ratio=ExpressionWrapper(100.0 * F("sql_total_time") / F("total_time"), output_field=FloatField()),
time_per_call=ExpressionWrapper(F("total_time") / F("total_calls"), output_field=FloatField()),
)
if ordering:
qs = qs.order_by(*ordering)
return [ViewProfiler(**model_to_dict(item)) for item in qs]
def reset(self):
Storage.objects.all().delete()
| # coding: utf-8
from django.db import IntegrityError
from django.db.models import ExpressionWrapper, F, FloatField, IntegerField
from django.forms import model_to_dict
from speedinfo.models import ViewProfiler
from speedinfo.storage.base import AbstractStorage
from speedinfo.storage.database.models import Storage
class DatabaseStorage(AbstractStorage):
def add(self, view_name, method, is_anon_call, is_cache_hit, sql_time, sql_count, view_execution_time):
try:
vp, created = Storage.objects.get_or_create(view_name=view_name, method=method)
except IntegrityError:
# IntegrityError raised in the case of concurrent access
# to get_or_create method from another application worker/thread
vp = Storage.objects.get(view_name=view_name, method=method)
Storage.objects.filter(pk=vp.pk).update(
anon_calls=F("anon_calls") + (is_anon_call and 1 or 0),
cache_hits=F("cache_hits") + (is_cache_hit and 1 or 0),
sql_total_time=F("sql_total_time") + sql_time,
sql_total_count=F("sql_total_count") + sql_count,
total_calls=F("total_calls") + 1,
total_time=F("total_time") + view_execution_time,
)
def fetch_all(self, ordering=None):
qs = Storage.objects.annotate(
anon_calls_ratio=ExpressionWrapper(100.0 * F("anon_calls") / F("total_calls"), output_field=FloatField()),
cache_hits_ratio=ExpressionWrapper(100.0 * F("cache_hits") / F("total_calls"), output_field=FloatField()),
sql_count_per_call=ExpressionWrapper(F("sql_total_count") / F("total_calls"), output_field=IntegerField()),
sql_time_ratio=ExpressionWrapper(100.0 * F("sql_total_time") / F("total_time"), output_field=FloatField()),
time_per_call=ExpressionWrapper(F("total_time") / F("total_calls"), output_field=FloatField()),
)
if ordering:
qs = qs.order_by(*ordering)
return [ViewProfiler(**model_to_dict(item)) for item in qs]
def reset(self):
Storage.objects.all().delete()
| Python | 0 |
5cd0ad7e865794401506dbc9358261b5fa020704 | Move and name region_lookup | saau/sections/age/median.py | saau/sections/age/median.py | import logging
from operator import itemgetter
from matplotlib.cm import get_cmap
import matplotlib as mpl
import cartopy.crs as ccrs
from ...utils.download.abs import get_generic_data, abs_data_to_dataframe
from ..image_provider import ImageProvider
from ...utils.header import render_header_to
DATASETID = 'ABS_CENSUS2011_B02'
FILENAME = 'median_ages.json'
class MedianAgeImageProvider(ImageProvider):
def has_required_data(self):
return self.data_dir_exists(FILENAME)
def obtain_data(self):
data = get_generic_data(
DATASETID,
and_=[
'FREQUENCY.A',
'REGIONTYPE.SA2',
'MEASURE.MAGE'
],
or_=[
'STATE.0',
'STATE.1',
'STATE.2',
'STATE.3',
'STATE.4',
'STATE.5',
'STATE.6',
'STATE.7',
'STATE.8',
'STATE.9'
]
)
assert data['series']
return self.save_json(FILENAME, data)
def region_lookup(self, sa3):
return self.services.sa3.get('SA3_CODE11', int(sa3))
def build_image(self):
colors = get_cmap('Purples')
age_data = abs_data_to_dataframe(self.load_json(FILENAME))
age_data = [
(
self.region_lookup(data_point.REGION),
data_point.Value
)
for _, data_point in age_data.iterrows()
]
values = list(map(itemgetter(1), age_data))
norm = mpl.colors.Normalize(
vmin=min(values),
vmax=max(values)
)
logging.info(
'%d -> %d',
min(values),
max(values)
)
aus_map = self.services.aus_map.get_map()
for shapes, mage in age_data:
aus_map.add_geometries(
[
shape.geometry
for shape in shapes.rec
if shape.geometry
],
crs=ccrs.PlateCarree(),
color=colors(norm(mage))
)
cax = aus_map.figure.add_axes([0.95, 0.2, 0.02, 0.6])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=colors,
norm=norm,
spacing='props'
)
cb.set_label('Average age')
return render_header_to(
aus_map,
19.25,
[
"<b>MAP</b>",
"SHOWING THE DISTRIBUTION OF",
"<b>MEDIAN AGE</b>",
"<i>Compiled using data from the 2011 Australian Census</i>"
]
)
| import logging
from operator import itemgetter
from matplotlib.cm import get_cmap
import matplotlib as mpl
import cartopy.crs as ccrs
from ...utils.download.abs import get_generic_data, abs_data_to_dataframe
from ..image_provider import ImageProvider
from ...utils.header import render_header_to
DATASETID = 'ABS_CENSUS2011_B02'
FILENAME = 'median_ages.json'
class MedianAgeImageProvider(ImageProvider):
def has_required_data(self):
return self.data_dir_exists(FILENAME)
def obtain_data(self):
data = get_generic_data(
DATASETID,
and_=[
'FREQUENCY.A',
'REGIONTYPE.SA2',
'MEASURE.MAGE'
],
or_=[
'STATE.0',
'STATE.1',
'STATE.2',
'STATE.3',
'STATE.4',
'STATE.5',
'STATE.6',
'STATE.7',
'STATE.8',
'STATE.9'
]
)
assert data['series']
return self.save_json(FILENAME, data)
def build_image(self):
colors = get_cmap('Purples')
age_data = abs_data_to_dataframe(self.load_json(FILENAME))
region_lookup = lambda sa3: self.services.sa3.get(
'SA3_CODE11', int(sa3)
)
age_data = [
(
region_lookup(data_point.REGION),
data_point.Value
)
for _, data_point in age_data.iterrows()
]
values = list(map(itemgetter(1), age_data))
norm = mpl.colors.Normalize(
vmin=min(values),
vmax=max(values)
)
logging.info(
'%d -> %d',
min(values),
max(values)
)
aus_map = self.services.aus_map.get_map()
for shapes, mage in age_data:
aus_map.add_geometries(
[
shape.geometry
for shape in shapes.rec
if shape.geometry
],
crs=ccrs.PlateCarree(),
color=colors(norm(mage))
)
cax = aus_map.figure.add_axes([0.95, 0.2, 0.02, 0.6])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=colors,
norm=norm,
spacing='props'
)
cb.set_label('Average age')
return render_header_to(
aus_map,
19.25,
[
"<b>MAP</b>",
"SHOWING THE DISTRIBUTION OF",
"<b>MEDIAN AGE</b>",
"<i>Compiled using data from the 2011 Australian Census</i>"
]
)
| Python | 0.000001 |
953d83119005075b9bc59d040389c209208263d5 | Integrate LLVM at llvm/llvm-project@7354a73945f1 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "7354a73945f1c123d66b01f51374ecbdba18fab3"
LLVM_SHA256 = "73a86e6f9d263a812bfdda5120b8f08467bd8ee39564b75da752854328a72803"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "7f2b016b820487f2fb69b93e784fff5d8297dea0"
LLVM_SHA256 = "348e586173038ab248e76be34d4a3e5667d56429350150a4a8130fba5a318e05"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| Python | 0.000001 |
509a542fd5e3171979fb74aec9226c057d289623 | Integrate LLVM at llvm/llvm-project@04a5ca862bb9 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "04a5ca862bb989acdd2729d0991b4e5a104bf244"
LLVM_SHA256 = "10a0c150c477a36eff25d49f0f50379fddf626a7d87a2b1846fb101173c742c9"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "b3a0bed5fb8766dcf27583ab1f73edc6e7232657"
LLVM_SHA256 = "0ee751d5754af930e05cea8b54b061e819e4254e06f64d211e07f2faf3395adf"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| Python | 0.000001 |
94fbcf6224624810a30a17cc9bc8d4c1f3458954 | Integrate LLVM at llvm/llvm-project@5c7b43aa8298 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5c7b43aa8298a389b906d72c792941a0ce57782e"
LLVM_SHA256 = "e34534a864e2bedaff6811effb757d2eed3a50c9c1e540515ed1568addf1815d"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9ba661f91276dd8cc728f9b2e82905b78c0119b4"
LLVM_SHA256 = "f89c033b0e8e6d4e6ff5ce3883aadc82a502b063a830cd685672cec4bea3dfb1"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| Python | 0.000001 |
fda8088ec3330ec5bc6ea7769c79d2fb9f227728 | Fix bug with valid hostnames with dashes. I added underscores even though they aren't valid just for good measure | salmon/apps/monitor/urls.py | salmon/apps/monitor/urls.py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.dashboard, name="dashboard"),
url(r'^(?P<name>[-\w\._]*)$', views.history, name="history"),
)
| from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.dashboard, name="dashboard"),
url(r'^(?P<name>[\w\.]*)$', views.history, name="history"),
)
| Python | 0.000003 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.