commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
73aa6990f75b6152cf983a59a81e7966fcba4a32 | Add __all__ to __init.py__ | jackstanek/BotBot,jackstanek/BotBot | botbot/__init__.py | botbot/__init__.py | __version__ = '0.0.1'
__all__ = ["checker", "problems"]
| __version__ = '0.0.1'
| mit | Python |
fe88f37559e93c9357f116ff51324a2201c68b82 | Change stringly type to classly typed - preparation for DirectoryController | caio2k/RIDE,robotframework/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,caio2k/RIDE,fingeronthebutton/RIDE | src/robotide/ui/images.py | src/robotide/ui/images.py | # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from robot.parsing.model import Variable
from robotide.controller.settingcontrollers import VariableController
from robotide.controller.macrocontrollers import TestCaseController, UserKeywordController
from robotide.controller.filecontrollers import TestDataDirectoryController, TestCaseFileController, ResourceFileController
_SIZE = (16, 16)
_BASE = os.path.join(os.path.dirname(__file__), '..', 'widgets')
class TreeImageList(wx.ImageList):
def __init__(self):
wx.ImageList.__init__(self, *_SIZE)
self._images = {
TestDataDirectoryController: self.directory,
TestCaseFileController: _TreeImage(self, wx.ART_NORMAL_FILE),
TestCaseController: _TreeImage(self, 'robot.png'),
UserKeywordController: _TreeImage(self, 'process.png'),
ResourceFileController: _TreeImage(self, wx.ART_EXECUTABLE_FILE),
VariableController: _TreeImage(self, 'dollar.png')
}
@property
def directory(self):
return _TreeImage(self, wx.ART_FOLDER, wx.ART_FOLDER_OPEN)
def __getitem__(self, controller):
return self._images[controller.__class__]
class _TreeImage(object):
def __init__(self, image_list, normal, expanded=None):
self.normal = self._get_image(image_list, normal)
self.expanded = self._get_image(image_list, expanded) if expanded else self.normal
def _get_image(self, image_list, source):
if source.startswith('wx'):
img = wx.ArtProvider_GetBitmap(source, wx.ART_OTHER, _SIZE)
else:
path = os.path.join(_BASE, source)
img = wx.Image(path, wx.BITMAP_TYPE_PNG).ConvertToBitmap()
return image_list.Add(img)
| # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from robot.parsing.model import Variable
_SIZE = (16, 16)
_BASE = os.path.join(os.path.dirname(__file__), '..', 'widgets')
class TreeImageList(wx.ImageList):
def __init__(self):
wx.ImageList.__init__(self, *_SIZE)
self._images = {
'TestDataDirectory': self.directory,
'TestCaseFile': _TreeImage(self, wx.ART_NORMAL_FILE),
'TestCase': _TreeImage(self, 'robot.png'),
'UserKeyword': _TreeImage(self, 'process.png'),
'ResourceFile': _TreeImage(self, wx.ART_EXECUTABLE_FILE),
'Variable': _TreeImage(self, 'dollar.png')
}
@property
def directory(self):
return _TreeImage(self, wx.ART_FOLDER, wx.ART_FOLDER_OPEN)
def __getitem__(self, controller):
return self._images[controller.data.__class__.__name__]
class _TreeImage(object):
def __init__(self, image_list, normal, expanded=None):
self.normal = self._get_image(image_list, normal)
self.expanded = self._get_image(image_list, expanded) if expanded else self.normal
def _get_image(self, image_list, source):
if source.startswith('wx'):
img = wx.ArtProvider_GetBitmap(source, wx.ART_OTHER, _SIZE)
else:
path = os.path.join(_BASE, source)
img = wx.Image(path, wx.BITMAP_TYPE_PNG).ConvertToBitmap()
return image_list.Add(img)
| apache-2.0 | Python |
30b9073d68b07003b89641ca5764305b8a7b1250 | fix not existing UnsupportedOSError | Widukind/dlstats,mmalter/dlstats,MichelJuillard/dlstats,MichelJuillard/dlstats,MichelJuillard/dlstats,Widukind/dlstats,mmalter/dlstats,mmalter/dlstats | dlstats/configuration.py | dlstats/configuration.py | import configobj
import validate
import os
def _get_filename():
"""Return the configuration file path."""
appname = 'dlstats'
if os.name == 'posix':
if "HOME" in os.environ:
if os.path.isfile(os.environ["HOME"]+'/.'+appname+'/main.conf'):
return os.environ["HOME"]+'/.'+appname+'/main.conf'
if os.path.isfile('/etc/'+appname+'/main.conf'):
return '/etc/'+appname+'/main.conf'
else:
raise FileNotFoundError('No configuration file found.')
elif os.name == 'mac':
return ("%s/Library/Application Support/%s" % (os.environ["HOME"], appname+'/main.conf'))
elif os.name == 'nt':
#TODO: Trouver une meilleure méthode
return ("%s/%s" % (os.environ["APPDATA"], appname+'/main.conf'))
else:
raise Exception("Not supported OS: %s" % os.name)
_configspec = """
[General]
logging_directory = string()
socket_directory = string()
[MongoDB]
host = ip_addr()
port = integer()
max_pool_size = integer()
socketTimeoutMS = integer()
connectTimeoutMS = integer()
waitQueueTimeout = integer()
waitQueueMultiple = integer()
auto_start_request = boolean()
use_greenlets = boolean()
[ElasticSearch]
host = integer()
port = integer()
[Fetchers]
[[Eurostat]]
url_table_of_contents = string()"""
try:
configuration_filename = _get_filename()
configuration = configobj.ConfigObj(configuration_filename,
configspec=_configspec.split('\n'))
validator = validate.Validator()
configuration.validate(validator)
except FileNotFoundError:
configuration = configobj.ConfigObj()
configuration['General'] = {'logging_directory': os.environ["HOME"], 'socket_directory': os.environ["HOME"]}
configuration['Fetchers'] = {'Eurostat':{'url_table_of_contents':'http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=table_of_contents.xml'}}
configuration['MongoDB'] = {'host':'127.0.0.1', 'port':27017}
configuration['ElasticSearch'] = {'host':'127.0.0.1', 'port':9200}
configuration = configuration.dict()
| import configobj
import validate
import os
def _get_filename():
"""Return the configuration file path."""
appname = 'dlstats'
if os.name == 'posix':
if "HOME" in os.environ:
if os.path.isfile(os.environ["HOME"]+'/.'+appname+'/main.conf'):
return os.environ["HOME"]+'/.'+appname+'/main.conf'
if os.path.isfile('/etc/'+appname+'/main.conf'):
return '/etc/'+appname+'/main.conf'
else:
raise FileNotFoundError('No configuration file found.')
elif os.name == 'mac':
return ("%s/Library/Application Support/%s" % (os.environ["HOME"], appname+'/main.conf'))
elif os.name == 'nt':
#TODO: Trouver une meilleure méthode
return ("%s/%s" % (os.environ["APPDATA"], appname+'/main.conf'))
else:
raise UnsupportedOSError(os.name)
_configspec = """
[General]
logging_directory = string()
socket_directory = string()
[MongoDB]
host = ip_addr()
port = integer()
max_pool_size = integer()
socketTimeoutMS = integer()
connectTimeoutMS = integer()
waitQueueTimeout = integer()
waitQueueMultiple = integer()
auto_start_request = boolean()
use_greenlets = boolean()
[ElasticSearch]
host = integer()
port = integer()
[Fetchers]
[[Eurostat]]
url_table_of_contents = string()"""
try:
configuration_filename = _get_filename()
configuration = configobj.ConfigObj(configuration_filename,
configspec=_configspec.split('\n'))
validator = validate.Validator()
configuration.validate(validator)
except FileNotFoundError:
configuration = configobj.ConfigObj()
configuration['General'] = {'logging_directory': os.environ["HOME"], 'socket_directory': os.environ["HOME"]}
configuration['Fetchers'] = {'Eurostat':{'url_table_of_contents':'http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=table_of_contents.xml'}}
configuration['MongoDB'] = {'host':'127.0.0.1', 'port':27017}
configuration['ElasticSearch'] = {'host':'127.0.0.1', 'port':9200}
configuration = configuration.dict()
| agpl-3.0 | Python |
a4cd7bf2979f489a2b010936ed31803d79eba7c1 | Bump version for development | wikimedia/operations-debs-python-kafka,mumrah/kafka-python,scrapinghub/kafka-python,mumrah/kafka-python,Aloomaio/kafka-python,scrapinghub/kafka-python,Aloomaio/kafka-python,dpkp/kafka-python,wikimedia/operations-debs-python-kafka,ohmu/kafka-python,DataDog/kafka-python,ohmu/kafka-python,Yelp/kafka-python,dpkp/kafka-python,Yelp/kafka-python | kafka/version.py | kafka/version.py | __version__ = '1.2.6.dev'
| __version__ = '1.2.5'
| apache-2.0 | Python |
1062b6b1a82e0916f81a1544b0df92dbd03d6185 | update event handler test py file | GoogleCloudPlatform/fourkeys,GoogleCloudPlatform/fourkeys,GoogleCloudPlatform/fourkeys | terraform/modules/fourkeys-images/files/event_handler/event_handler_test.py | terraform/modules/fourkeys-images/files/event_handler/event_handler_test.py | # Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
from hashlib import sha1
import event_handler
import mock
import pytest
@pytest.fixture
def client():
event_handler.app.testing = True
return event_handler.app.test_client()
def test_unauthorized_source(client):
r = client.post("/", data="Hello")
assert r.status_code == 403
r = client.get("/", data="Hello")
assert r.status_code == 403
def test_missing_signature(client):
r = client.post("/", headers={"User-Agent": "GitHub-Hookshot"})
assert r.status_code == 403
@mock.patch("sources.get_secret", mock.MagicMock(return_value=b"foo"))
def test_unverified_signature(client):
r = client.post(
"/",
headers={
"User-Agent": "GitHub-Hookshot",
"X-Hub-Signature": "foobar",
},
)
assert r.status_code == 403
@mock.patch("sources.get_secret", mock.MagicMock(return_value=b"foo"))
@mock.patch(
"event_handler.publish_to_pubsub", mock.MagicMock(return_value=True)
)
def test_verified_signature(client):
signature = "sha1=" + hmac.new(b"foo", b"Hello", sha1).hexdigest()
r = client.post(
"/",
data="Hello",
headers={"User-Agent": "GitHub-Hookshot", "X-Hub-Signature": signature},
)
assert r.status_code == 204
@mock.patch("sources.get_secret", mock.MagicMock(return_value=b"foo"))
def test_data_sent_to_pubsub(client):
signature = "sha1=" + hmac.new(b"foo", b"Hello", sha1).hexdigest()
event_handler.publish_to_pubsub = mock.MagicMock(return_value=True)
headers = {
"User-Agent": "GitHub-Hookshot",
"Host": "localhost",
"Content-Length": "5",
"X-Hub-Signature": signature,
}
r = client.post("/", data="Hello", headers=headers)
event_handler.publish_to_pubsub.assert_called_with(
"github", b"Hello", headers
)
assert r.status_code == 204
| # Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
from hashlib import sha1
import event_handler
import mock
import pytest
@pytest.fixture
def client():
event_handler.app.testing = True
return event_handler.app.test_client()
def test_unauthorized_source(client):
with pytest.raises(Exception) as e:
client.post("/")
assert "Source not authorized" in str(e.value)
def test_missing_signature(client):
with pytest.raises(Exception) as e:
client.post("/", headers={"User-Agent": "GitHub-Hookshot"})
assert "Github signature is empty" in str(e.value)
@mock.patch("sources.get_secret", mock.MagicMock(return_value=b"foo"))
def test_unverified_signature(client):
with pytest.raises(Exception) as e:
client.post(
"/",
headers={
"User-Agent": "GitHub-Hookshot",
"X-Hub-Signature": "foobar",
},
)
assert "Unverified Signature" in str(e.value)
@mock.patch("sources.get_secret", mock.MagicMock(return_value=b"foo"))
@mock.patch(
"event_handler.publish_to_pubsub", mock.MagicMock(return_value=True)
)
def test_verified_signature(client):
signature = "sha1=" + hmac.new(b"foo", b"Hello", sha1).hexdigest()
r = client.post(
"/",
data="Hello",
headers={"User-Agent": "GitHub-Hookshot", "X-Hub-Signature": signature},
)
assert r.status_code == 204
@mock.patch("sources.get_secret", mock.MagicMock(return_value=b"foo"))
def test_data_sent_to_pubsub(client):
signature = "sha1=" + hmac.new(b"foo", b"Hello", sha1).hexdigest()
event_handler.publish_to_pubsub = mock.MagicMock(return_value=True)
headers = {
"User-Agent": "GitHub-Hookshot",
"Host": "localhost",
"Content-Length": "5",
"X-Hub-Signature": signature,
}
r = client.post("/", data="Hello", headers=headers)
event_handler.publish_to_pubsub.assert_called_with(
"github", b"Hello", headers
)
assert r.status_code == 204
| apache-2.0 | Python |
d5f5033d0700f90625089e1a84112b19acf4912c | Improve search query performance | SCUEvals/scuevals-api,SCUEvals/scuevals-api | scuevals_api/resources/search.py | scuevals_api/resources/search.py | from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from marshmallow import fields
from sqlalchemy import func
from sqlalchemy.orm import subqueryload
from scuevals_api.models import Role, Course, Department, School, Professor
from scuevals_api.roles import role_required
from scuevals_api.utils import use_args
class SearchResource(Resource):
args = {'q': fields.String(required=True), 'limit': fields.Integer()}
@jwt_required
@role_required(Role.Student)
@use_args(args)
def get(self, args):
jwt_data = get_jwt_identity()
if 'limit' not in args or args['limit'] > 50:
args['limit'] = 50
# strip any characters that would cause matching issues
q = args['q'].replace(',', '')
courses = Course.query.join(Course.department).filter(
Course.department.has(Department.school.has(School.university_id == jwt_data['university_id']))
).filter(
func.concat(Department.abbreviation, ' ', Course.number, ' ', Course.title).ilike('%{}%'.format(q))
).limit(args['limit'])
professors = Professor.query.filter(
func.concat(Professor.last_name, ' ', Professor.first_name).ilike('%{}%'.format(q)) |
func.concat(Professor.first_name, ' ', Professor.last_name).ilike('%{}%'.format(q))
).limit(args['limit'])
return {
'courses': [
{
**course.to_dict(),
'quarters': [section.quarter.id for section in course.sections]
}
for course in courses.all()
],
'professors': [
{
'id': professor.id,
'first_name': professor.first_name,
'last_name': professor.last_name
}
for professor in professors.all()
]
}
| from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from marshmallow import fields
from sqlalchemy import func
from sqlalchemy.orm import subqueryload
from scuevals_api.models import Role, Course, Department, School, Professor
from scuevals_api.roles import role_required
from scuevals_api.utils import use_args
class SearchResource(Resource):
args = {'q': fields.String(required=True), 'limit': fields.Integer()}
@jwt_required
@role_required(Role.Student)
@use_args(args)
def get(self, args):
jwt_data = get_jwt_identity()
if 'limit' not in args or args['limit'] > 50:
args['limit'] = 50
# strip any characters that would cause matching issues
q = args['q'].replace(',', '')
courses = Course.query.options(
subqueryload(Course.department)
).join(Course.department, Department.school).filter(
School.university_id == jwt_data['university_id']
).filter(
func.concat(Department.abbreviation, ' ', Course.number, ' ', Course.title).ilike('%{}%'.format(q))
).limit(args['limit']).all()
professors = Professor.query.filter(
func.concat(Professor.last_name, ' ', Professor.first_name).ilike('%{}%'.format(q)) |
func.concat(Professor.first_name, ' ', Professor.last_name).ilike('%{}%'.format(q))
).limit(args['limit']).all()
return {
'courses': [
{
'id': course.id,
'department': course.department.abbreviation,
'number': course.number,
'title': course.title,
'quarters': [section.quarter.id for section in course.sections]
}
for course in courses
],
'professors': [
{
'id': professor.id,
'first_name': professor.first_name,
'last_name': professor.last_name
}
for professor in professors
]
}
| agpl-3.0 | Python |
c5970991ed2d3285e6a3ef9badb6e73756ff876b | Fix `assert_called` usage for Python 3.5 build | prkumar/uplink | tests/test_session.py | tests/test_session.py | # Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
| # Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
| mit | Python |
df71bbfd2d33ef76393fcd03355eefce023f9897 | replace deprecated get_model | iivvoo/resturo | resturo/models.py | resturo/models.py | from django.db import models
from django.conf import settings
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
class ModelResolver(object):
def __call__(self, name):
model_path = getattr(self, name)
try:
app_label, model_class_name = model_path.split('.')
except ValueError:
raise ImproperlyConfigured(
"{0} must be of the form 'app_label.model_name'".format(name))
model = apps.get_model(app_label, model_class_name)
if model is None:
raise ImproperlyConfigured(
"{0} refers to model '{1}' that has not been "
"installed".format(name, model_path))
return model
def __getattr__(self, name):
# resolveclass
if name == 'User':
model = settings.AUTH_USER_MODEL
else:
try:
model_path = settings.MODELS[name]
except (KeyError, AttributeError):
raise ImproperlyConfigured(
"no MODELS have been configured, {0} can't be resolved"
.format(name))
model = model_path
return model
modelresolver = ModelResolver()
class Organization(models.Model):
class Meta:
abstract = True
name = models.TextField()
members = models.ManyToManyField(modelresolver.User,
through=modelresolver.Membership,
related_name="organizations")
def __unicode__(self):
return self.name
__str__ = __unicode__
class Membership(models.Model):
class Meta:
abstract = True
user = models.ForeignKey(modelresolver.User)
organization = models.ForeignKey(modelresolver.Organization)
role = models.IntegerField(default=0)
| from django.db import models
from django.conf import settings
from django.db.models import get_model
from django.core.exceptions import ImproperlyConfigured
class ModelResolver(object):
def __call__(self, name):
model_path = getattr(self, name)
try:
app_label, model_class_name = model_path.split('.')
except ValueError:
raise ImproperlyConfigured(
"{0} must be of the form 'app_label.model_name'".format(name))
model = get_model(app_label, model_class_name)
if model is None:
raise ImproperlyConfigured(
"{0} refers to model '{1}' that has not been "
"installed".format(name, model_path))
return model
def __getattr__(self, name):
# resolveclass
if name == 'User':
model = settings.AUTH_USER_MODEL
else:
try:
model_path = settings.MODELS[name]
except (KeyError, AttributeError):
raise ImproperlyConfigured(
"no MODELS have been configured, {0} can't be resolved"
.format(name))
model = model_path
return model
modelresolver = ModelResolver()
class Organization(models.Model):
class Meta:
abstract = True
name = models.TextField()
members = models.ManyToManyField(modelresolver.User,
through=modelresolver.Membership,
related_name="organizations")
def __unicode__(self):
return self.name
__str__ = __unicode__
class Membership(models.Model):
class Meta:
abstract = True
user = models.ForeignKey(modelresolver.User)
organization = models.ForeignKey(modelresolver.Organization)
role = models.IntegerField(default=0)
| isc | Python |
56716020f42ddd1abaed1d387f1ab76c4bd9ff73 | add unit test for reset method | anchore/anchore-engine,anchore/anchore-engine,anchore/anchore-engine | tests/unit/anchore_engine/services/policy_engine/engine/policy/test_gate.py | tests/unit/anchore_engine/services/policy_engine/engine/policy/test_gate.py | import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import (
PackageCheckGate,
BaseTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
def test_reset(self):
trigger = BaseTrigger(PackageCheckGate)
trigger._fired_instances = [1, 2, 3]
trigger.reset()
assert trigger._fired_instances == []
| import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import PackageCheckGate
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
| apache-2.0 | Python |
216bbaf8cdcf37acba56b9045a4c9967869d195f | make test_tracker safe to run in parallel | dsanders11/easypost-python,EasyPost/easypost-python | tests/test_tracker.py | tests/test_tracker.py | # Unit tests related to 'Trackers' (https://www.easypost.com/docs/api#tracking).
import datetime
import random
import easypost
def test_tracker_values():
for code, status in (
('EZ1000000001', 'pre_transit'),
('EZ2000000002', 'in_transit'),
('EZ3000000003', 'out_for_delivery'),
('EZ4000000004', 'delivered'),
('EZ5000000005', 'return_to_sender'),
('EZ6000000006', 'failure'),
('EZ7000000007', 'unknown'),
):
tracker = easypost.Tracker.create(tracking_code=code)
assert tracker.status == status
assert tracker.tracking_details != []
if status == 'delivered':
assert tracker.signed_by == 'John Tester'
def test_tracker_interactions():
# create a pseudo-random tracking code so that we can run multiple instances of this test in parallel
tracking_code = '{0}{1}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
random.randrange(1, 100)
)
# Create a tracker and then retrieve it. We assert on created and retrieved tracker's values.
tracker = easypost.Tracker.create(
tracking_code=tracking_code,
carrier="usps",
options=dict(
full_test_tracker=True,
)
)
assert tracker.id # This is random
# retrieve tracker by id
tracker2 = easypost.Tracker.retrieve(tracker.id)
assert tracker2.id == tracker.id # Should be the same as above
# retrieve all trackers by tracking_code
trackers = easypost.Tracker.all(tracking_code=tracking_code)
assert len(trackers["trackers"])
assert trackers["trackers"][0].id == tracker.id == tracker2.id # Should be the same as the ids above
# create another test tracker
tracker3 = easypost.Tracker.create(
tracking_code=tracking_code,
carrier="USPS",
options=dict(
full_test_tracker=True,
)
)
assert tracker3.id
# retrieve all created since 'tracker'
trackers2 = easypost.Tracker.all(after_id=tracker.id, tracking_code=tracking_code)
assert len(trackers2["trackers"]) == 1 # Should be 1
assert trackers2["has_more"] is False # Should be false
assert trackers2["trackers"][0].id == tracker3.id # Should be the same as the id for tracker3
| # Unit tests related to 'Trackers' (https://www.easypost.com/docs/api#tracking).
import easypost
def test_tracker():
# Create a tracker and then retrieve it. We assert on created and retrieved tracker's values.
tracker = easypost.Tracker.create(
tracking_code="EZ2000000002",
carrier="USPS"
)
assert tracker.id # This is random
# retrieve tracker by id
tracker2 = easypost.Tracker.retrieve(tracker.id)
assert tracker2.id == tracker.id # Should be the same as above
# retrieve all trackers by tracking_code
trackers = easypost.Tracker.all(tracking_code="EZ2000000002")
assert len(trackers["trackers"])
assert trackers["trackers"][0].id == tracker.id == tracker2.id # Should be the same as the ids above
# create another test tracker
tracker3 = easypost.Tracker.create(
tracking_code="EZ2000000002",
carrier="USPS"
)
assert tracker3.id
# retrieve all created since 'tracker'
trackers2 = easypost.Tracker.all(after_id=tracker.id)
assert len(trackers2["trackers"]) == 1 # Should be 1
assert trackers2["has_more"] is False # Should be false
assert trackers2["trackers"][0].id == tracker3.id # Should be the same as the id for tracker3
| mit | Python |
ad1ce131d37deb179314f00e23eca9bfcbad0be4 | add pre_dispatcher command | dials/dials,dials/dials,dials/dials,dials/dials,dials/dials | command_line/spot_counts_per_image.py | command_line/spot_counts_per_image.py | from __future__ import division
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
from dials.util.options import OptionParser
from dials.util.options import flatten_reflections, flatten_datablocks
from dials.algorithms.peak_finding import per_image_analysis
import iotbx.phil
phil_scope = iotbx.phil.parse("""\
plot=None
.type = path
individual_plots=False
.type = bool
id = None
.type = int(value_min=0)
""")
def run(args):
parser = OptionParser(
read_reflections=True,
read_datablocks=True,
phil=phil_scope,
check_format=False)
from libtbx.utils import Abort
params, options = parser.parse_args(show_diff_phil=False)
reflections = flatten_reflections(params.input.reflections)
datablocks = flatten_datablocks(params.input.datablock)
if len(reflections) != 1:
raise Abort('exactly 1 reflection table must be specified')
if len(datablocks) != 1:
raise Abort('exactly 1 datablock must be specified')
reflections = reflections[0]
imageset = datablocks[0].extract_imagesets()[0]
if params.id is not None:
reflections = reflections.select(reflections['id'] == params.id)
stats = per_image_analysis.stats_imageset(
imageset, reflections, plot=params.individual_plots)
per_image_analysis.print_table(stats)
if params.plot is not None:
per_image_analysis.plot_stats(stats, filename=params.plot)
if __name__ == '__main__':
import sys
run(sys.argv[1:])
| from dials.util.options import OptionParser
from dials.util.options import flatten_reflections, flatten_datablocks
from dials.algorithms.peak_finding import per_image_analysis
import iotbx.phil
phil_scope = iotbx.phil.parse("""\
plot=None
.type = path
individual_plots=False
.type = bool
id = None
.type = int(value_min=0)
""")
def run(args):
parser = OptionParser(
read_reflections=True,
read_datablocks=True,
phil=phil_scope,
check_format=False)
from libtbx.utils import Abort
params, options = parser.parse_args(show_diff_phil=False)
reflections = flatten_reflections(params.input.reflections)
datablocks = flatten_datablocks(params.input.datablock)
if len(reflections) != 1:
raise Abort('exactly 1 reflection table must be specified')
if len(datablocks) != 1:
raise Abort('exactly 1 datablock must be specified')
reflections = reflections[0]
imageset = datablocks[0].extract_imagesets()[0]
if params.id is not None:
reflections = reflections.select(reflections['id'] == params.id)
stats = per_image_analysis.stats_imageset(
imageset, reflections, plot=params.individual_plots)
per_image_analysis.print_table(stats)
if params.plot is not None:
per_image_analysis.plot_stats(stats, filename=params.plot)
if __name__ == '__main__':
import sys
run(sys.argv[1:])
| bsd-3-clause | Python |
937fcb43d43c81dcdb6f65344022e68bedcdb324 | Remove zip archive after extraction | jakelever/kindred,jakelever/kindred | kindred/utils.py | kindred/utils.py |
import os
import zipfile
import hashlib
import requests
import logging
import traceback
import time
def _calcSHA256(filename):
return hashlib.sha256(open(filename, 'rb').read()).hexdigest()
def _findDir(name, path):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if name in dirs:
return os.path.abspath(os.path.join(root, name))
return None
# From: https://stackoverflow.com/questions/32763720/timeout-a-file-download-with-python-urllib
def _downloadFile(url,filename,timeout=180):
# Make the actual request, set the timeout for no data to 10 seconds and enable streaming responses so we don't have to keep the large files in memory
request = requests.get(url, timeout=10, stream=True)
# Open the output file and make sure we write in binary mode
with open(filename, 'wb') as fh:
downloadedSize = 0.0
# Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
for chunk in request.iter_content(1024 * 1024):
# Write the chunk to the file
fh.write(chunk)
# Optionally we can check here if the download is taking too long
downloadedSize += (len(chunk) / float(1024*1024))
currentDateTime = time.strftime("%Y/%m/%d %H:%M:%S")
print("%s : Downloaded %.1f MB" % (currentDateTime,downloadedSize))
def _downloadFiles(files,downloadDirectory):
#oldTimeout = socket.getdefaulttimeout()
#ssl.SSLSocket.settimeout(180)
if not os.path.isdir(downloadDirectory):
os.mkdir(downloadDirectory)
for url,shortName,expectedSHA256 in files:
downloadedPath = os.path.join(downloadDirectory,shortName)
if os.path.isfile(downloadedPath):
downloadedSHA256 = _calcSHA256(downloadedPath)
if not downloadedSHA256 == expectedSHA256:
os.remove(downloadedPath)
if not os.path.isfile(downloadedPath):
#wget.download(url,out=downloadedPath,bar=None)
try:
_downloadFile(url,downloadedPath)
except Exception as e:
logging.error(traceback.format_exc())
print(type(e))
raise
downloadedSHA256 = _calcSHA256(downloadedPath)
assert downloadedSHA256 == expectedSHA256, "SHA256 mismatch with downloaded file: %s" % shortName
if shortName.endswith('.zip'):
zip_ref = zipfile.ZipFile(downloadedPath, 'r')
zip_ref.extractall(path=downloadDirectory)
zip_ref.close()
os.remove(downloadedPath)
#socket.setdefaulttimeout(oldTimeout)
|
import os
import zipfile
import hashlib
import requests
import logging
import traceback
import time
def _calcSHA256(filename):
return hashlib.sha256(open(filename, 'rb').read()).hexdigest()
def _findDir(name, path):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if name in dirs:
return os.path.abspath(os.path.join(root, name))
return None
# From: https://stackoverflow.com/questions/32763720/timeout-a-file-download-with-python-urllib
def _downloadFile(url,filename,timeout=180):
# Make the actual request, set the timeout for no data to 10 seconds and enable streaming responses so we don't have to keep the large files in memory
request = requests.get(url, timeout=10, stream=True)
# Open the output file and make sure we write in binary mode
with open(filename, 'wb') as fh:
downloadedSize = 0.0
# Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
for chunk in request.iter_content(1024 * 1024):
# Write the chunk to the file
fh.write(chunk)
# Optionally we can check here if the download is taking too long
downloadedSize += (len(chunk) / float(1024*1024))
currentDateTime = time.strftime("%Y/%m/%d %H:%M:%S")
print("%s : Downloaded %.1f MB" % (currentDateTime,downloadedSize))
def _downloadFiles(files,downloadDirectory):
#oldTimeout = socket.getdefaulttimeout()
#ssl.SSLSocket.settimeout(180)
if not os.path.isdir(downloadDirectory):
os.mkdir(downloadDirectory)
for url,shortName,expectedSHA256 in files:
downloadedPath = os.path.join(downloadDirectory,shortName)
if os.path.isfile(downloadedPath):
downloadedSHA256 = _calcSHA256(downloadedPath)
if not downloadedSHA256 == expectedSHA256:
os.remove(downloadedPath)
if not os.path.isfile(downloadedPath):
#wget.download(url,out=downloadedPath,bar=None)
try:
_downloadFile(url,downloadedPath)
except Exception as e:
logging.error(traceback.format_exc())
print(type(e))
raise
downloadedSHA256 = _calcSHA256(downloadedPath)
assert downloadedSHA256 == expectedSHA256, "SHA256 mismatch with downloaded file: %s" % shortName
if shortName.endswith('.zip'):
zip_ref = zipfile.ZipFile(downloadedPath, 'r')
zip_ref.extractall(path=downloadDirectory)
zip_ref.close()
#socket.setdefaulttimeout(oldTimeout)
| mit | Python |
37b00c5fb5afead3e793ee2677b4a910e90a4510 | split up into functions and handle token expiration | benediktg/wallabag-migration | post-articles.py | post-articles.py | #!/usr/bin/env python3
import requests
import configparser
import csv
import sys
counter = 0
def main(args):
config = configparser.ConfigParser()
config.read('credentials.ini')
hostname, payload = extractCreds(config)
token = getToken(hostname, payload)
fp = open(args[1], newline='')
reader = csv.DictReader(fp)
global counter
for row in reader:
failCount = 0
while failCount < 2:
article = extractArticle(row, token)
printf('.')
r = requests.post('{}/api/entries.json'.format(hostname), article)
if not connectionFailed(r):
counter += 1
break
else:
failCount += 1
token = getToken(hostname, payload)
article['access_token'] = token
if failCount == 2:
print('\nConnection failed.\nAborting.')
break
def extractCreds(config):
config = config.defaults()
hostname = config['host']
username = config['username']
password = config['password']
clientid = config['client_id']
secret = config['c_secret']
payload = {'username': username, 'password': password,
'client_id': clientid, 'client_secret': secret,
'grant_type': 'password'}
return (hostname, payload)
def getToken(hostname, payload):
r = requests.get('{}/oauth/v2/token'.format(hostname), payload)
token = r.json().get('access_token')
refresh = r.json().get('refresh_token')
payload['grant_type'] = 'refresh_token'
payload['refresh_token'] = refresh
return token
def extractArticle(row, token):
url = row['url']
isRead = int(row['is_read'])
isFaved = int(row['is_fav'])
article = {'url': url, 'archive': isRead,
'starred': isFaved, 'access_token': token}
return article
def connectionFailed(response):
return 'error' in response.json().keys()
def printf(text):
print(text, end='', flush=True)
if __name__ == "__main__":
try:
main(sys.argv)
print('\nposted {} articles\nfinished successfully.'.format(counter))
except(KeyboardInterrupt):
print('\nposted {} articles\naborted.'.format(counter))
| #!/usr/bin/env python3
import requests
import configparser
import csv
import sys
counter = 0
def printf(text):
print(text, end='', flush=True)
def main(args):
c = configparser.ConfigParser()
c.read('credentials.ini')
c = c.defaults()
hostname = c['host']
username = c['username']
password = c['password']
clientid = c['client_id']
secret = c['c_secret']
payload = {'username': username, 'password': password,
'client_id': clientid, 'client_secret': secret,
'grant_type': 'password'}
r = requests.get('{}/oauth/v2/token'.format(hostname), payload)
token = r.json().get('access_token')
fp = open(args[1], newline='')
reader = csv.DictReader(fp)
global counter
for row in reader:
url = row['url']
isRead = int(row['is_read'])
isFaved = int(row['is_fav'])
article = {'url': url, 'archive': isRead,
'starred': isFaved, 'access_token': token}
printf('.')
r = requests.post('{}/api/entries.json'.format(hostname), article)
counter += 1
if __name__ == "__main__":
try:
main(sys.argv)
print('\nposted {} articles'.format(counter))
print('finished')
except(KeyboardInterrupt):
print('\nposted {} articles'.format(counter))
print('aborted')
| mit | Python |
e97b62425a8024ffcf447d1706aabebd5644a5ca | Update test_unicode.py | mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext | tests/test_unicode.py | tests/test_unicode.py | # coding: utf-8
import sys
import pytest
import jupytext
from .utils import list_all_notebooks
try:
unicode # Python 2
except NameError:
unicode = str # Python 3
@pytest.mark.parametrize('nb_file', list_all_notebooks('.ipynb') +
list_all_notebooks('.Rmd'))
def test_notebook_contents_is_unicode(nb_file):
nb = jupytext.readf(nb_file)
for cell in nb.cells:
assert cell.source == '' or isinstance(cell.source, unicode)
def test_write_non_ascii(tmpdir):
nb = jupytext.reads(u'Non-ascii contênt', ext='.Rmd')
jupytext.writef(nb, str(tmpdir.join('notebook.Rmd')))
jupytext.writef(nb, str(tmpdir.join('notebook.ipynb')))
| # coding: utf-8
import sys
import pytest
import jupytext
from .utils import list_all_notebooks
try:
unicode # Python 2
except NameError:
unicode = str # Python 3
@pytest.mark.parametrize('nb_file', list_all_notebooks('.ipynb') +
list_all_notebooks('.Rmd'))
def test_notebook_contents_is_unicode(nb_file):
nb = jupytext.readf(nb_file)
for cell in nb.cells:
if sys.version_info < (3, 0):
assert cell.source == '' or isinstance(cell.source, unicode)
else:
assert isinstance(cell.source, str)
def test_write_non_ascii(tmpdir):
nb = jupytext.reads(u'Non-ascii contênt', ext='.Rmd')
jupytext.writef(nb, str(tmpdir.join('notebook.Rmd')))
jupytext.writef(nb, str(tmpdir.join('notebook.ipynb')))
| mit | Python |
141acf950ee9ffc211028bf96cab31a68fffcbe3 | Add a way to show local scorecards #9 | antoinecarme/sklearn_explain | tests/gen_makefile.py | tests/gen_makefile.py | import os
import glob
subdirs = ["tests/protoyping"] + glob.glob("tests/skl_datasets/*") + glob.glob("tests/skl_datasets_reg/*") + glob.glob("tests/issues/*") + glob.glob("tests/score_card/*")
build_tests = ['sample', 'tests_issues', 'iris', 'boston']
all_target = "";
build_target = "";
for subdir1 in subdirs:
test_target = "";
for filename in glob.glob(subdir1 + "/*.py"):
bn1 = os.path.basename(filename);
lKeep = (not bn1.startswith('gen_all.py') and not bn1.startswith('gen_makefile.py'))
bn = subdir1 + "/" + bn1;
bn2 = bn.replace("/" , "_")
logfile = bn.replace("/" , "_");
logfile = "logs/" + logfile.replace(".py" , ".log");
if(lKeep):
print(bn1 + ": " , "\n\t", "-python3" , filename , " > " , logfile , " 2>&1\n");
test_target = bn1 + " " + test_target;
subdir1_label = subdir1.replace("/" , "_")
print(subdir1_label + ":" , test_target)
all_target = all_target + " " + subdir1_label;
for bt in build_tests:
if(bt in subdir1_label):
build_target = build_target + " " + subdir1_label;
#print("\n# ********************************************** \n");
print("\n\nall: " , all_target, "\n\t\n");
print("\n\nbuild-test: " , build_target, "\n\t\n");
| import os
import glob
subdirs = ["tests/protoyping"] + glob.glob("tests/skl_datasets/*") + glob.glob("tests/skl_datasets_reg/*") + glob.glob("tests/issues/*")
build_tests = ['sample', 'tests_issues', 'iris', 'boston']
all_target = "";
build_target = "";
for subdir1 in subdirs:
test_target = "";
for filename in glob.glob(subdir1 + "/*.py"):
bn1 = os.path.basename(filename);
lKeep = (not bn1.startswith('gen_all.py') and not bn1.startswith('gen_makefile.py'))
bn = subdir1 + "/" + bn1;
bn2 = bn.replace("/" , "_")
logfile = bn.replace("/" , "_");
logfile = "logs/" + logfile.replace(".py" , ".log");
if(lKeep):
print(bn1 + ": " , "\n\t", "-python3" , filename , " > " , logfile , " 2>&1\n");
test_target = bn1 + " " + test_target;
subdir1_label = subdir1.replace("/" , "_")
print(subdir1_label + ":" , test_target)
all_target = all_target + " " + subdir1_label;
for bt in build_tests:
if(bt in subdir1_label):
build_target = build_target + " " + subdir1_label;
#print("\n# ********************************************** \n");
print("\n\nall: " , all_target, "\n\t\n");
print("\n\nbuild-test: " , build_target, "\n\t\n");
| bsd-3-clause | Python |
4ad76ef7e5193d0b88df24ca4b6f6f9cb58c3e3d | Add __init__.py | dkalashnik/tempest,afaheem88/tempest,pandeyop/tempest,neerja28/Tempest,flyingfish007/tempest,zsoltdudas/lis-tempest,Vaidyanath/tempest,openstack/tempest,jamielennox/tempest,ntymtsiv/tempest,dkalashnik/tempest,vedujoshi/os_tempest,nunogt/tempest,jamielennox/tempest,pandeyop/tempest,cloudbase/lis-tempest,itskewpie/tempest,hayderimran7/tempest,citrix-openstack/build-tempest,manasi24/jiocloud-tempest-qatempest,varunarya10/tempest,jaspreetw/tempest,hayderimran7/tempest,Lilywei123/tempest,akash1808/tempest,tonyli71/tempest,tonyli71/tempest,vmahuli/tempest,vedujoshi/tempest,BeenzSyed/tempest,Juraci/tempest,xbezdick/tempest,vmahuli/tempest,xbezdick/tempest,queria/my-tempest,JioCloud/tempest,ebagdasa/tempest,redhat-cip/tempest,pczerkas/tempest,jaspreetw/tempest,citrix-openstack-build/tempest,redhat-cip/tempest,sebrandon1/tempest,BeenzSyed/tempest,CiscoSystems/tempest,Lilywei123/tempest,manasi24/jiocloud-tempest-qatempest,cisco-openstack/tempest,citrix-openstack/build-tempest,rakeshmi/tempest,roopali8/tempest,eggmaster/tempest,CiscoSystems/tempest,eggmaster/tempest,armando-migliaccio/tempest,vedujoshi/tempest,adkerr/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,flyingfish007/tempest,izadorozhna/tempest,ntymtsiv/tempest,armando-migliaccio/tempest,masayukig/tempest,NexusIS/tempest,bigswitch/tempest,Juniper/tempest,neerja28/Tempest,rzarzynski/tempest,Tesora/tesora-tempest,Juniper/tempest,tudorvio/tempest,LIS/lis-tempest,afaheem88/tempest_neutron,tudorvio/tempest,Vaidyanath/tempest,itskewpie/tempest,manasi24/tempest,LIS/lis-tempest,danielmellado/tempest,Juraci/tempest,roopali8/tempest,Mirantis/tempest,Mirantis/tempest,pczerkas/tempest,varunarya10/tempest,Tesora/tesora-tempest,izadorozhna/tempest,danielmellado/tempest,queria/my-tempest,akash1808/tempest,alinbalutoiu/tempest,nunogt/tempest,masayukig/tempest,openstack/tempest,rakeshmi/tempest,yamt/tempest,cisco-openstack/tempest,rzarzynski/tempest,bigswitch/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,NexusIS/tempest,JioCloud/tempest,adkerr/tempest,yamt/tempest,zsoltdudas/lis-tempest,citrix-openstack-build/tempest,hpcloud-mon/tempest,afaheem88/tempest,manasi24/tempest,hpcloud-mon/tempest,vedujoshi/os_tempest,ebagdasa/tempest,sebrandon1/tempest,cloudbase/lis-tempest,alinbalutoiu/tempest,afaheem88/tempest_neutron | kong/__init__.py | kong/__init__.py | # Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| apache-2.0 | Python | |
3412b0db76f77a4772cf76c7794fbe61b58f5a25 | Select correct for vote detection | muffins-on-dope/bakery,muffins-on-dope/bakery,muffins-on-dope/bakery | bakery/views.py | bakery/views.py | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.views.generic import ListView, TemplateView, RedirectView
from django.contrib import auth
from bakery.cookies.models import Cookie
from bakery.socialize.models import Vote
class HomeView(ListView):
model = Cookie
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
user_votes = Vote.objects.get_for_user(self.request.user.id)
voted_cookie_ids = user_votes.values_list('cookie_id', flat=True).all()
context['voted_cookie_ids'] = voted_cookie_ids
return context
home = HomeView.as_view()
class StylesView(TemplateView):
template_name = 'styles.html'
styles = StylesView.as_view()
class LoginErrorView(TemplateView):
template_name = 'error.html'
login_error = LoginErrorView.as_view()
class LogoutView(RedirectView):
permanent = False
def get_redirect_url(self, **kwargs):
auth.logout(self.request)
return reverse('home')
logout = LogoutView.as_view()
| # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.views.generic import ListView, TemplateView, RedirectView
from django.contrib import auth
from bakery.cookies.models import Cookie
from bakery.socialize.models import Vote
class HomeView(ListView):
model = Cookie
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
user_votes = Vote.objects.get_for_user(self.request.user.id)
voted_cookie_ids = user_votes.values_list('pk', flat=True).all()
context['voted_cookie_ids'] = voted_cookie_ids
return context
home = HomeView.as_view()
class StylesView(TemplateView):
template_name = 'styles.html'
styles = StylesView.as_view()
class LoginErrorView(TemplateView):
template_name = 'error.html'
login_error = LoginErrorView.as_view()
class LogoutView(RedirectView):
permanent = False
def get_redirect_url(self, **kwargs):
auth.logout(self.request)
return reverse('home')
logout = LogoutView.as_view()
| bsd-3-clause | Python |
d635fc9129bc4ccfd5384be6958ae1c14e9916ec | Add persona merging to translation merge script | okfde/eucopyright,okfde/eucopyright,okfde/eucopyright | scripts/merge_translations.py | scripts/merge_translations.py | import sys
import yaml
def persona(old, new, overwrite_language):
old_t = old['translations']
new_t = new['translations']
for key in old_t:
if key in new_t and overwrite_language in new_t[key]:
old_t[key][overwrite_language] = new_t[key][overwrite_language]
def questions(old, new, overwrite_language):
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if overwrite_language in n['explanation']:
o['explanation'][overwrite_language] = n['explanation'][overwrite_language]
if overwrite_language in n['explanationmore']:
o['explanationmore'][overwrite_language] = n['explanationmore'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
def main(mode, base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
if mode == 'persona':
persona(old, new, overwrite_language)
elif mode == 'questions':
questions(old, new, overwrite_language)
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
persona(*sys.argv)
| import sys
import yaml
def main(base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
| mit | Python |
59891c76620bcc0630f0d56bfac94040c8651233 | remove create key hook | openstates/openstates.org,openstates/openstates.org,openstates/openstates.org,openstates/openstates.org | profiles/apps.py | profiles/apps.py | from django.apps import AppConfig
def create_profile(sender, instance, **kwargs):
from profiles.models import Profile
Profile.objects.get_or_create(user=instance)
class ProfilesConfig(AppConfig):
name = "profiles"
def ready(self):
from django.db.models.signals import post_save
from django.contrib.auth.models import User
post_save.connect(create_profile, sender=User)
| from django.apps import AppConfig
def create_key_for_verified_user(sender, **kwargs):
from simplekeys.models import Key, Tier
email = kwargs["email_address"]
try:
Key.objects.get(email=email.email)
except Key.DoesNotExist:
Key.objects.create(
tier=Tier.objects.get(slug="default"),
status="a",
email=email.email,
name=email.email,
)
def create_profile(sender, instance, **kwargs):
from profiles.models import Profile
Profile.objects.get_or_create(user=instance)
class ProfilesConfig(AppConfig):
name = "profiles"
def ready(self):
from allauth.account.signals import email_confirmed
from django.db.models.signals import post_save
from django.contrib.auth.models import User
email_confirmed.connect(create_key_for_verified_user)
post_save.connect(create_profile, sender=User)
| mit | Python |
a3525ca5e11b37a1bc76b2bebd84cf0d3b93a0c2 | fix lightsource hotkeys | campagnola/acq4,campagnola/acq4,meganbkratz/acq4,campagnola/acq4,meganbkratz/acq4,acq4/acq4,acq4/acq4,acq4/acq4,meganbkratz/acq4,campagnola/acq4,pbmanis/acq4,pbmanis/acq4,meganbkratz/acq4,pbmanis/acq4,pbmanis/acq4,acq4/acq4 | acq4/devices/LightSource/LightSource.py | acq4/devices/LightSource/LightSource.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.devices.Device import *
from acq4.util import Qt
import acq4.util.Mutex as Mutex
from collections import OrderedDict
class LightSource(Device):
"""Device tracking the state and properties of multiple illumination sources.
"""
# emitted when the on/off status of a light changes
sigLightChanged = Qt.Signal(object, object) # self, light_name
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
self._sources = OrderedDict() # [name: {'active': bool, 'wavelength': float, 'power': float, ...}, ...]
self._lock = Mutex.Mutex()
def addSource(self, name, conf):
self._sources[name] = conf
if 'xkey' in conf:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
dev.addKeyCallback((row, col), self._hotkeyPressed, (name,))
def describe(self, onlyActive=True):
"""Return a description of the current state of all active light sources.
If onlyActive is False, then information for all sources will be returned, whether or not they are active.
"""
if onlyActive:
return OrderedDict([(n,s) for n,s in self._sources.items() if s['active']])
else:
return self._sources.copy()
def activeSources(self):
"""Return the names of all active light sources.
"""
return [s['name'] for s in self._sources if s['active']]
def sourceActive(self, name):
"""Return True if the named light source is currently active.
"""
return self._sources[name]['active']
def setSourceActive(self, name, active):
"""Activate / deactivate a light source.
"""
raise NotImplementedError()
def _updateXkeyLight(self, name):
if 'xkey' in self._sources[name]:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
bl = dev.getBacklights()
bl[row,col] = int(self._sources[name]['active'])
dev.setBacklights(bl)
def _hotkeyPressed(self, dev, changes, name):
self.setSourceActive(name, not self.sourceActive(name))
| # -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.devices.Device import *
from acq4.util import Qt
import acq4.util.Mutex as Mutex
from collections import OrderedDict
class LightSource(Device):
"""Device tracking the state and properties of multiple illumination sources.
"""
# emitted when the on/off status of a light changes
sigLightChanged = Qt.Signal(object, object) # self, light_name
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
self._sources = OrderedDict() # [name: {'active': bool, 'wavelength': float, 'power': float, ...}, ...]
self._lock = Mutex.Mutex()
def addSource(self, name, conf):
self._sources[name] = conf
if 'xkey' in conf:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
dev.addKeyCallback((row, col), self._hotkeyPressed, name)
def describe(self, onlyActive=True):
"""Return a description of the current state of all active light sources.
If onlyActive is False, then information for all sources will be returned, whether or not they are active.
"""
if onlyActive:
return OrderedDict([(n,s) for n,s in self._sources.items() if s['active']])
else:
return self._sources.copy()
def activeSources(self):
"""Return the names of all active light sources.
"""
return [s['name'] for s in self._sources if s['active']]
def sourceActive(self, name):
"""Return True if the named light source is currently active.
"""
return self._sources[name]['active']
def setSourceActive(self, name, active):
"""Activate / deactivate a light source.
"""
raise NotImplementedError()
def _updateXkeyLight(self, name):
if 'xkey' in self._sources[name]:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
bl = dev.getBacklights()
bl[row,col] = int(self._sources[name]['active'])
dev.setBacklights(bl)
def _hotkeyPressed(self, dev, changes, name):
self.setSourceActive(name, not self.sourceActive(name))
| mit | Python |
3a783f91a3cbb7e126af3f84969f92eb00a9a13d | Update dependency bazelbuild/bazel to latest version | google/copybara,google/copybara,google/copybara | third_party/bazel.bzl | third_party/bazel.bzl | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "52fb54d6f62e3f5bf22d16e8e396878246a64584"
bazel_sha256 = "94285222af3fa6a068f8f325207781886fe35090bd561a2603cb3279d1fdef07"
| # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "c809cfce9212f6286353b47068add670f6a401dc"
bazel_sha256 = "95be88e25cacb58c05814c815eb4220b28588043f4a5017f378f3d8a26adbf14"
| apache-2.0 | Python |
50e8cddf423e48c2b8a7c6d38e10ff9a634e198a | add some better checkingto do_put | longaccess/longaccess-client,longaccess/longaccess-client,longaccess/longaccess-client | lacli/command.py | lacli/command.py | import os
import cmd
import glob
from lacli.upload import pool_upload
class LaCommand(cmd.Cmd):
""" Our LA command line interface"""
prompt='lacli> '
def __init__(self, tokens, *args, **kwargs):
cmd.Cmd.__init__(self, *args, **kwargs)
self.tokens=tokens
def do_tvmconf(self, line):
"""tvmconf
reset TVM access configuration (key, token, etc)"""
return True
def do_EOF(self, line):
return True
def do_put(self, f):
"""Upload a file to LA [filename]
"""
fname=f.strip()
if not fname:
fname=raw_input('Filename: ').strip()
if not fname:
print "Argument required."
elif not os.path.isfile(fname):
print 'File {} not found or is not a regular file.'.format(fname)
else:
pool_upload(fname, self.tokens)
def complete_put(self, text, line, begidx, endidx):
return [os.path.basename(x) for x in glob.glob('{}*'.format(line[4:]))]
| import os
import cmd
import glob
from lacli.upload import pool_upload
class LaCommand(cmd.Cmd):
""" Our LA command line interface"""
prompt='lacli> '
def __init__(self, tokens, *args, **kwargs):
cmd.Cmd.__init__(self, *args, **kwargs)
self.tokens=tokens
def do_tvmconf(self, line):
"""tvmconf
reset TVM access configuration (key, token, etc)"""
return True
def do_EOF(self, line):
return True
def do_put(self, f):
fname=f.strip()
if not fname:
fname=raw_input('Filename: ').strip()
if not os.path.isfile(fname):
print 'File {} not found or is not a directory.'.format(fname)
else:
pool_upload(fname, self.tokens)
def complete_put(self, text, line, begidx, endidx):
return [os.path.basename(x) for x in glob.glob('{}*'.format(line[4:]))]
| apache-2.0 | Python |
03a78a509c0213f8f95223a7926a1bce547f05fe | Initialize parent_pid in the Proc class init. | wglass/rotterdam | rotterdam/proc.py | rotterdam/proc.py | import logging
import os
import signal
from setproctitle import setproctitle
class Proc(object):
signal_map = {}
def __init__(self):
self.logger = logging.getLogger(self.__module__)
self.pid = None
self.parent_pid = None
@property
def name(self):
return self.__class__.__name__.lower()
def setup(self):
self.pid = os.getpid()
self.parent_pid = os.getppid()
self.setup_signals()
setproctitle("rotterdam: %s" % self.name)
def run(self):
self.setup()
self.logger.info("Starting %s (%d)", self.name, int(self.pid))
def setup_signals(self):
for signal_name, handler_name in self.signal_map.iteritems():
signal.signal(
getattr(signal, "SIG%s" % signal_name.upper()),
getattr(self, handler_name)
)
| import logging
import os
import signal
from setproctitle import setproctitle
class Proc(object):
signal_map = {}
def __init__(self):
self.logger = logging.getLogger(self.__module__)
self.pid = None
@property
def name(self):
return self.__class__.__name__.lower()
def setup(self):
self.pid = os.getpid()
self.parent_pid = os.getppid()
self.setup_signals()
setproctitle("rotterdam: %s" % self.name)
def run(self):
self.setup()
self.logger.info("Starting %s (%d)", self.name, int(self.pid))
def setup_signals(self):
for signal_name, handler_name in self.signal_map.iteritems():
signal.signal(
getattr(signal, "SIG%s" % signal_name.upper()),
getattr(self, handler_name)
)
| mit | Python |
27768a69784f267af822b6b763a5c526285c7ba2 | print out evaluation data. | yashsavani/rechorder | predictor.py | predictor.py | #!/usr/bin/python
import util
import chordKMeans
import sys
import random
import numpy as np
np.set_printoptions(formatter={'float': lambda x: '%.2f\t'%x})
# part 1
if len(sys.argv) == 1:
midiFileName = 'default.mid'
else:
midiFileName = sys.argv[1]
barLists = util.getNGramBarList(midiFileName)
for x in barLists:
for y in x:
print y.getKMeansFeatures()
featureCentroids = chordKMeans.getFeatureCentroids(midiFileName, 12)
print featureCentroids[0]
print featureCentroids[1]
for i in range(20) :
featureCentroids = chordKMeans.getFeatureCentroids(midiFileName, 12)
print "for k = %s"%i
print chordKMeans.evaluateKmeansClusters(midiFileName, featureCentroids[0], featureCentroids[1])
# want to, given new Midi
def buildMarkovModel(labelSeries, k):
'''
Assumes that label series is a sequence of integers in 0, ..., k-1.
also assumes that labelSeries is nonempty
'''
model = [[1 for i in range(k)] for j in range(k)]
for i in range(len(labelSeries) - 1):
before = labelSeries[i]
after = labelSeries[i+1]
model[before][after] += 1
for i in range(k):
n = sum(model[i])
for j in range(k):
model[i][j] *= 1.0 / n
return model
def makeRandomPrediction(model, before):
'''
model: a k by k list of lists of floats.
model[i] should sum up to 1 for all i.
before: an integer between 0 and k-1 inclusive.
There are ways to make this happen in log(k) rather than k time but we won't do this now.
'''
probability_distribution = model[before]
# this should sum up to 1 and be nonnegative.
continuous_choice = random.random()
for i, probability in enumerate(probability_distribution):
if probability >= continuous_choice:
return i
else:
continuous_choice -= probability
#If you're here there's a problem
return "There's an error in prediction"
class prettyfloat(float):
def __repr__(self):
return "%0.2f" % self
# testing out Markov model
print "testing out Markov model."
k = 5
labelSeries = [0,1,2,3,4] * 10 + [0, 1, 2] * 10
model = buildMarkovModel(labelSeries, k)
print "----labelSeries----"
print labelSeries
print "------model:-------"
for prior, distribution in enumerate(model):
print "given", prior, "distribution is", map(prettyfloat, distribution)
print "----predictions----"
for prior in range(k):
print "given prior", prior, "model randomly predicts", makeRandomPrediction(model, prior)
# part 2... hopefully we'll get here
| #!/usr/bin/python
import util
import chordKMeans
import sys
import random
import numpy as np
np.set_printoptions(formatter={'float': lambda x: '%.2f\t'%x})
# part 1
if len(sys.argv) == 1:
midiFileName = 'default.mid'
else:
midiFileName = sys.argv[1]
barLists = util.getNGramBarList(midiFileName)
for x in barLists:
for y in x:
print y.getKMeansFeatures()
featureCentroids = chordKMeans.getFeatureCentroids(midiFileName, 12)
print featureCentroids[0]
print featureCentroids[1]
# featureCentroids = chordKMeans.getFeatureCentroids(midiFileName)
# want to, given new Midi
def buildMarkovModel(labelSeries, k):
'''
Assumes that label series is a sequence of integers in 0, ..., k-1.
also assumes that labelSeries is nonempty
'''
model = [[1 for i in range(k)] for j in range(k)]
for i in range(len(labelSeries) - 1):
before = labelSeries[i]
after = labelSeries[i+1]
model[before][after] += 1
for i in range(k):
n = sum(model[i])
for j in range(k):
model[i][j] *= 1.0 / n
return model
def makeRandomPrediction(model, before):
'''
model: a k by k list of lists of floats.
model[i] should sum up to 1 for all i.
before: an integer between 0 and k-1 inclusive.
There are ways to make this happen in log(k) rather than k time but we won't do this now.
'''
probability_distribution = model[before]
# this should sum up to 1 and be nonnegative.
continuous_choice = random.random()
for i, probability in enumerate(probability_distribution):
if probability >= continuous_choice:
return i
else:
continuous_choice -= probability
#If you're here there's a problem
return "There's an error in prediction"
class prettyfloat(float):
def __repr__(self):
return "%0.2f" % self
# testing out Markov model
print "testing out Markov model."
k = 5
labelSeries = [0,1,2,3,4] * 10 + [0, 1, 2] * 10
model = buildMarkovModel(labelSeries, k)
print "----labelSeries----"
print labelSeries
print "------model:-------"
for prior, distribution in enumerate(model):
print "given", prior, "distribution is", map(prettyfloat, distribution)
print "----predictions----"
for prior in range(k):
print "given prior", prior, "model randomly predicts", makeRandomPrediction(model, prior)
# part 2... hopefully we'll get here
| mit | Python |
3f6b85b912c8819b362db5b37533c0810d7ce45d | Add tests for Varsh rule 8.7.2 (4) | tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge | tests/nuclear_test.py | tests/nuclear_test.py | """Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex, symbols, KroneckerDelta
from drudge import NuclearBogoliubovDrudge, Range
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf, TOf, CG
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf, TOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
def test_varsh_872_4(nuclear: NuclearBogoliubovDrudge):
"""Test simplification based on Varshalovich 8.7.2 Eq (4)."""
dr = nuclear
c, gamma, c_prm, gamma_prm = symbols('c gamma cprm gammaprm')
a, alpha, b, beta = symbols('a alpha b beta')
m_range = Range('m')
sums = [
(alpha, m_range[-a, a + 1]), (beta, m_range[-b, b + 1])
]
amp = CG(a, alpha, b, beta, c, gamma) * CG(
a, alpha, b, beta, c_prm, gamma_prm
)
# Make sure that the pattern matching works in any way the summations are
# written.
for sums_i in [sums, reversed(sums)]:
tensor = dr.sum(*sums_i, amp)
res = tensor.simplify_cg()
assert res.n_terms == 1
term = res.local_terms[0]
assert len(term.sums) == 0
assert term.amp == KroneckerDelta(
c, c_prm
) * KroneckerDelta(gamma, gamma_prm)
| """Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex
from drudge import NuclearBogoliubovDrudge
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf, TOf
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf, TOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
| mit | Python |
ee6224bce1336bd0cf812033737a1a8c61c6323d | Update pydocs feature clean up (owner of the feature/application) (Lead: Kunal) | css-iter/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,css-iter/org.csstudio.iter,css-iter/org.csstudio.iter,css-iter/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter,css-iter/org.csstudio.iter,css-iter/org.csstudio.iter,css-iter/org.csstudio.iter,css-iter/org.csstudio.iter,ControlSystemStudio/org.csstudio.iter | build/org.csstudio.validation.scripts/check_feature.py | build/org.csstudio.validation.scripts/check_feature.py | '''
Created on Sep 17, 2014
A simple script which scans all feature.xml files under the directory it was invoked from to ensure
1. that none of them explicitly define JRE container
2. All features have a provider
3. All features have a description or description URL
4. No features have required plugins, tycho should take care of dependency resolution.
@author: Kunal Shroff
'''
import os.path
import sys
import xml.etree.ElementTree as ET
defaultDesc = "[Enter Feature Description here.]"
incorrectFiles = []
for dirpath, dirnames, filenames in os.walk("."):
for completefilename in [ os.path.join(dirpath, f) for f in filenames if f == "feature.xml" ]:
tree = ET.parse(completefilename)
if tree.findall("requires"):
incorrectFiles.append(completefilename + " REASON: The feature file should not have the requires node, you can ensure this by removing all the dependencies defined in the feature")
root = tree.getroot()
if "provider-name" not in root.attrib:
incorrectFiles.append(completefilename + " REASON: The feature file should have a provider")
descElem = root.find("description")
if descElem is not None:
if defaultDesc in descElem.text and "url" not in descElem.attrib:
incorrectFiles.append(completefilename + " REASON: No feature description")
else:
incorrectFiles.append(completefilename + " REASON: No feature description")
if len(incorrectFiles) != 0:
print 'Following incorrectly configured feature.xml files are committed to repository.'
for f in incorrectFiles:
print f
sys.exit(-1)
| '''
Created on Sep 17, 2014
A simple script which scans all feature.xml files under the directory it was invoked from to ensure that none of them explicitly define JRE container
@author: Kunal Shroff
'''
import os.path
import sys
import xml.etree.ElementTree as ET
defaultDesc = "[Enter Feature Description here.]"
incorrectFiles = []
for dirpath, dirnames, filenames in os.walk("."):
for completefilename in [ os.path.join(dirpath, f) for f in filenames if f == "feature.xml" ]:
tree = ET.parse(completefilename)
if tree.findall("requires"):
incorrectFiles.append(completefilename + " REASON: The feature file should not have the requires node, you can ensure this by removing all the dependencies defined in the feature")
root = tree.getroot()
if "provider-name" not in root.attrib:
incorrectFiles.append(completefilename + " REASON: The feature file should have a provider")
descElem = root.find("description")
if descElem is not None:
if defaultDesc in descElem.text and "url" not in descElem.attrib:
incorrectFiles.append(completefilename + " REASON: No feature description")
else:
incorrectFiles.append(completefilename + " REASON: No feature description")
if len(incorrectFiles) != 0:
print 'Following incorrectly configured feature.xml files are committed to repository.'
for f in incorrectFiles:
print f
sys.exit(-1)
| epl-1.0 | Python |
1f418a7b6aa2f7879e32dc6857a3cb376485f1fc | Disable schema version update until FCS | csm-aut/csm,csm-aut/csm,csm-aut/csm,csm-aut/csm | csmserver/schema/base.py | csmserver/schema/base.py | # =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from models import SystemVersion
from database import DBSession
class BaseMigrate(object):
def __init__(self, version):
self.version = version
def update_schema_version(self):
db_session = DBSession()
system_version = SystemVersion.get(db_session)
system_version.schema_version = self.version
# db_session.commit()
def execute(self):
self.start()
self.update_schema_version()
def start(self):
raise NotImplementedError("Children must override start")
| # =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from models import SystemVersion
from database import DBSession
class BaseMigrate(object):
def __init__(self, version):
self.version = version
def update_schema_version(self):
db_session = DBSession()
system_version = SystemVersion.get(db_session)
system_version.schema_version = self.version
db_session.commit()
def execute(self):
self.start()
self.update_schema_version()
def start(self):
raise NotImplementedError("Children must override start")
| apache-2.0 | Python |
b3b7161bc15bdc9f37a3bdd0cebce139ba1ba70a | use current directory | peterorum/functal,peterorum/functal,peterorum/functal,peterorum/functal,peterorum/functal | titles/fill-titles.py | titles/fill-titles.py | #!/usr/bin/python3
# add a title to any functal without one
import os
#import re
#import sys
# import json
#import random
#import collections
import time
import subprocess
from pprint import pprint
# pp = pprint.PrettyPrinter(indent=4)
import pymongo
client = pymongo.MongoClient(os.getenv('mongo_functal'))
db_topics = client['topics']
db_functal = client['functal']
#--- find all functals
def get_functals():
return list(db_functal.images.find())
#--- find untitled
def get_functals_without_title():
return list(db_functal.images.find({'title': {'$exists': False}}))
#--- run
def run():
#functals = get_functals_without_title()
functals = get_functals()
for functal in functals:
# pprint(functal)
topic = functal.get('topic', None)
if topic is not None:
title = db_topics.titles.find_one({'topic': topic})
if title is None:
# give enough time for new titles to be generated by 5 min cron & try again
print('out of titles for ' + topic + ' - shelling')
subprocess.call(['./get-tweets.py', '-t', topic])
subprocess.call(['./get-titles.py'])
title = db_topics.titles.find_one({'topic': topic})
if title is not None:
print(topic + ': ' + title['title'])
functal['title'] = title['title']
db_functal.images.update({'_id': functal['_id']}, {'$set': functal}, upsert=False)
# remove used title
db_topics.titles.remove(title)
else:
print('still no title for ' + topic)
else:
print(functal['name'] + ': no topic')
client.close()
#---
run()
| #!/usr/bin/python3
# add a title to any functal without one
import os
#import re
#import sys
# import json
#import random
#import collections
import time
import subprocess
from pprint import pprint
# pp = pprint.PrettyPrinter(indent=4)
import pymongo
client = pymongo.MongoClient(os.getenv('mongo_functal'))
db_topics = client['topics']
db_functal = client['functal']
#--- find all functals
def get_functals():
return list(db_functal.images.find())
#--- find untitled
def get_functals_without_title():
return list(db_functal.images.find({'title': {'$exists': False}}))
#--- run
def run():
#functals = get_functals_without_title()
functals = get_functals()
for functal in functals:
# pprint(functal)
topic = functal.get('topic', None)
if topic is not None:
title = db_topics.titles.find_one({'topic': topic})
if title is None:
# give enough time for new titles to be generated by 5 min cron & try again
print('out of titles for ' + topic + ' - shelling')
subprocess.call(['get-tweets.py', '-t', topic])
subprocess.call(['get-titles.py'])
title = db_topics.titles.find_one({'topic': topic})
if title is not None:
print(topic + ': ' + title['title'])
functal['title'] = title['title']
db_functal.images.update({'_id': functal['_id']}, {'$set': functal}, upsert=False)
# remove used title
db_topics.titles.remove(title)
else:
print('still no title for ' + topic)
else:
print(functal['name'] + ': no topic')
client.close()
#---
run()
| mit | Python |
235de58b16995b610ec06c28e6340e81fbc46b7b | Remove unused import | dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | custom/ewsghana/forms.py | custom/ewsghana/forms.py | from corehq.apps.reminders.forms import BroadcastForm
from corehq.apps.reminders.models import (RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION)
from crispy_forms import layout as crispy
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _, ugettext_lazy
ROLE_ALL = '(any role)'
ROLE_IN_CHARGE = 'In Charge'
ROLE_NURSE = 'Nurse'
ROLE_PHARMACIST = 'Pharmacist'
ROLE_LABORATORY_STAFF = 'Laboratory Staff'
ROLE_OTHER = 'Other'
ROLE_FACILITY_MANAGER = 'Facility Manager'
EWS_USER_ROLES = (
ROLE_ALL,
ROLE_IN_CHARGE,
ROLE_NURSE,
ROLE_PHARMACIST,
ROLE_LABORATORY_STAFF,
ROLE_OTHER,
ROLE_FACILITY_MANAGER,
)
class InputStockForm(forms.Form):
product_id = forms.CharField(widget=forms.HiddenInput())
product = forms.CharField(widget=forms.HiddenInput(), required=False)
stock_on_hand = forms.IntegerField(min_value=0, required=False)
receipts = forms.IntegerField(min_value=0, initial=0, required=False)
units = forms.CharField(required=False)
monthly_consumption = forms.IntegerField(required=False, widget=forms.HiddenInput())
class EWSBroadcastForm(BroadcastForm):
role = forms.ChoiceField(
required=False,
label=ugettext_lazy('Send to users with role'),
choices=((role, ugettext_lazy(role)) for role in EWS_USER_ROLES),
)
@property
def crispy_recipient_fields(self):
fields = super(EWSBroadcastForm, self).crispy_recipient_fields
fields.append(
crispy.Div(
crispy.Field(
'role',
data_bind='value: role',
),
data_bind='visible: showUserGroupSelect() || showLocationSelect()',
)
)
return fields
def clean_role(self):
if self.cleaned_data.get('recipient_type') not in (RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION):
return None
value = self.cleaned_data.get('role')
if value not in EWS_USER_ROLES:
raise ValidationError(_('Invalid choice selected.'))
return value
def get_user_data_filter(self):
role = self.cleaned_data.get('role')
if role is None or role == ROLE_ALL:
return {}
else:
return {'role': [role]}
| from corehq import toggles
from corehq.apps.reminders.forms import BroadcastForm
from corehq.apps.reminders.models import (RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION)
from crispy_forms import layout as crispy
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _, ugettext_lazy
ROLE_ALL = '(any role)'
ROLE_IN_CHARGE = 'In Charge'
ROLE_NURSE = 'Nurse'
ROLE_PHARMACIST = 'Pharmacist'
ROLE_LABORATORY_STAFF = 'Laboratory Staff'
ROLE_OTHER = 'Other'
ROLE_FACILITY_MANAGER = 'Facility Manager'
EWS_USER_ROLES = (
ROLE_ALL,
ROLE_IN_CHARGE,
ROLE_NURSE,
ROLE_PHARMACIST,
ROLE_LABORATORY_STAFF,
ROLE_OTHER,
ROLE_FACILITY_MANAGER,
)
class InputStockForm(forms.Form):
product_id = forms.CharField(widget=forms.HiddenInput())
product = forms.CharField(widget=forms.HiddenInput(), required=False)
stock_on_hand = forms.IntegerField(min_value=0, required=False)
receipts = forms.IntegerField(min_value=0, initial=0, required=False)
units = forms.CharField(required=False)
monthly_consumption = forms.IntegerField(required=False, widget=forms.HiddenInput())
class EWSBroadcastForm(BroadcastForm):
role = forms.ChoiceField(
required=False,
label=ugettext_lazy('Send to users with role'),
choices=((role, ugettext_lazy(role)) for role in EWS_USER_ROLES),
)
@property
def crispy_recipient_fields(self):
fields = super(EWSBroadcastForm, self).crispy_recipient_fields
fields.append(
crispy.Div(
crispy.Field(
'role',
data_bind='value: role',
),
data_bind='visible: showUserGroupSelect() || showLocationSelect()',
)
)
return fields
def clean_role(self):
if self.cleaned_data.get('recipient_type') not in (RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION):
return None
value = self.cleaned_data.get('role')
if value not in EWS_USER_ROLES:
raise ValidationError(_('Invalid choice selected.'))
return value
def get_user_data_filter(self):
role = self.cleaned_data.get('role')
if role is None or role == ROLE_ALL:
return {}
else:
return {'role': [role]}
| bsd-3-clause | Python |
86fea752c68ff5786556f5555ca580f2d0a9de85 | fix bug with debug handling | longaccess/longaccess-client,longaccess/longaccess-client,longaccess/longaccess-client | latvm/session.py | latvm/session.py | from latvm.tvm import MyTvm
from boto import config as boto_config
from boto.exception import NoAuthHandlerFound
class NoCredentialsException(Exception): pass
class UploadSession(object):
def __init__(self, uid=None, secs=3600, bucket='lastage', retries=0, debug=0):
self.uid=uid
self.secs=3600
if not boto_config.has_section('Boto'):
boto_config.add_section('Boto')
boto_config.set('Boto','num_retries', str(retries))
if debug != 0:
import multiprocessing as mp
mp.util.log_to_stderr(mp.util.SUBDEBUG)
boto_config.set('Boto','debug',str(debug))
try:
self.tvm = MyTvm(bucket=bucket)
except NoAuthHandlerFound as e:
raise NoCredentialsException
def tokens(self):
while True:
yield self.tvm.get_upload_token(uid=self.uid,
secs=self.secs)
| from latvm.tvm import MyTvm
from boto import config as boto_config
from boto.exception import NoAuthHandlerFound
class NoCredentialsException(Exception): pass
class UploadSession(object):
def __init__(self, uid=None, secs=3600, bucket='lastage', retries=0, debug=0):
self.uid=uid
self.secs=3600
if not boto_config.has_section('Boto'):
boto_config.add_section('Boto')
boto_config.set('Boto','num_retries', str(retries))
if debug != 0:
import multiprocessing
multiprocessing.util.log_to_stderr(mp.util.SUBDEBUG)
boto_config.set('Boto','debug',str(debug))
try:
self.tvm = MyTvm(bucket=bucket)
except NoAuthHandlerFound as e:
raise NoCredentialsException
def tokens(self):
while True:
yield self.tvm.get_upload_token(uid=self.uid,
secs=self.secs)
| apache-2.0 | Python |
ce6a3a3833d498fa32a5317fd95e206cad9d5a83 | Add importing from __future__ & revise main() | bowen0701/algorithms_data_structures | alg_gcd.py | alg_gcd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def gcd(m, n):
"""Greatest Common Divisor (GCD) by Euclid's Algorithm.
Time complexity: O(m%n).
"""
while n != 0:
m, n = n, m % n
return m
def main():
print('gcd(4, 2): {}'.format(gcd(4, 2)))
print('gcd(2, 4): {}'.format(gcd(2, 4)))
print('gcd(10, 4): {}'.format(gcd(10, 4)))
print('gcd(4, 10): {}'.format(gcd(4, 10)))
print('gcd(3, 4): {}'.format(gcd(3, 4)))
print('gcd(4, 3): {}'.format(gcd(4, 3)))
if __name__ == '__main__':
main()
| def gcd(m, n):
"""Greatest Common Divisor (GCD) by Euclid's Algorithm.
Time complexity: O(m%n).
"""
while n != 0:
m, n = n, m % n
return m
def main():
print(gcd(4, 2))
print(gcd(2, 4))
print(gcd(10, 4))
print(gcd(4, 10))
print(gcd(10, 1))
print(gcd(1, 10))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
6357d5e9e255b14ceb5f2905d4782837e4761738 | Revise gcd algorithm | bowen0701/algorithms_data_structures | alg_gcd.py | alg_gcd.py | def gcd(m, n):
"""Greatest Common Divisor (GCD) by Euclid's Algorithm.
Time complexity: O(m%n).
"""
while n != 0:
m, n = n, m % n
return m
def main():
print(gcd(4, 2))
print(gcd(2, 4))
print(gcd(10, 4))
print(gcd(4, 10))
print(gcd(10, 1))
print(gcd(1, 10))
if __name__ == '__main__':
main()
| def compute_gcd(m, n):
"""Compute the greatest common divisor (GCD) by Euclid's Algorithm.
Time complexity: O(m%n).
"""
while m % n != 0:
old_m = m
old_n = n
m = old_n
n = old_m % old_n
return n
| bsd-2-clause | Python |
c52b92c4f977bbb7eede968fad98113a5fe6c441 | Bump gtk-sharp to a newer build | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,mono/bockbuild,mono/bockbuild | packages/gtk-sharp-2.12-release.py | packages/gtk-sharp-2.12-release.py | class GtkSharp212ReleasePackage (Package):
def __init__ (self):
Package.__init__ (self, 'gtk-sharp', '2.12.21', sources = ['http://files.xamarin.com/~duncan/gtk-sharp-2.12.21.tar.gz'])
# self.configure = './bootstrap-2.12 --prefix="%{prefix}"'
self.make = 'make CSC=gmcs'
GtkSharp212ReleasePackage ()
| class GtkSharp212ReleasePackage (Package):
def __init__ (self):
Package.__init__ (self, 'gtk-sharp', '2.12.12', sources = ['http://files.xamarin.com/~alan/gtk-sharp-2.12.12.tar.gz'])
# self.configure = './bootstrap-2.12 --prefix="%{prefix}"'
self.make = 'make CSC=gmcs'
GtkSharp212ReleasePackage ()
| mit | Python |
0b45ec48955f73a0e88422660af82ff6fd89333b | Use Tuesday branch on Tuesdays | DrDos0016/z2,DrDos0016/z2,DrDos0016/z2 | tools/crons/newwoz.py | tools/crons/newwoz.py | import os
import django
from datetime import datetime
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from museum_site.wozzt_queue import WoZZT_Queue # noqa: E402
def main():
now = datetime.now()
if now.weekday() == 1: # Tuesday
entry = WoZZT_Queue.objects.filter(category="tuesday")
else:
entry = WoZZT_Queue.objects.all(category="wozzt")
entry = entry.order_by("-priority", "id")[0]
entry.send_tweet()
entry.delete_image()
entry.delete()
print("Done.")
if __name__ == "__main__":
main()
| import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
print("NEW WOZZT TEST")
from museum_site.wozzt_queue import WoZZT_Queue # noqa: E402
def main():
entry = WoZZT_Queue.objects.all().order_by("-priority", "id")[0]
entry.send_tweet()
entry.delete_image()
entry.delete()
print("Well that was easy.")
if __name__ == "__main__":
main()
| mit | Python |
ed89c92ac56e89648bf965ea3aa8963e840e3a5c | Fix the unit test :) | StarbotDiscord/Starbot,dhinakg/BitSTAR,StarbotDiscord/Starbot,dhinakg/BitSTAR | tests/test_excuses.py | tests/test_excuses.py | # Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from api import message, plugin
from plugins import excuses
class TestExcuseSuite(unittest.TestCase):
def testExcuseMsg(self):
msg = message.message(body="")
msg.command = "excuse"
excuseList = ["I have an appointment with a robot.",
"I was abducted by robots.",
"I didn’t know what day it was because I was looking at the Robotic Calendar.",
"My robot threw up on my source code.",
"I need to take my robot for a walk.",
"I had to get a cybernetic head and couldn't get anything done.",
"My Robot Assistant blue-screened.",
"A kernel panic erased my work.",
"Somebody used up the data limit watching YouTube."]
sorryList = ["Please excuse me,", "I'm sorry, but", "I hope you forgive me, because"]
fullExcuseList = []
for sorry in sorryList:
for excuse in excuseList:
fullExcuseList.append('*{} {}*'.format(sorry, excuse))
result=excuses.onCommand(msg)
print(result.body)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body in fullExcuseList, True)
| # Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from api import message, plugin
from plugins import excuses
class TestExcuseSuite(unittest.TestCase):
def testExcuseMsg(self):
msg = message.message(body="")
msg.command = "excuse"
fullExcuseList = []
for sorry in excuses.sorryList:
for excuse in excuses.excuseList:
fullExcuseList.append('*{} {}*'.format(sorryList[sorry], excuseList[excuse])
result=excuses.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body in fullExcuseList, True)
| apache-2.0 | Python |
38749a0033c2acc6c9fd8971749c68f93cb9c0db | Add virtualenv.create function to enable easy virtualenv creation | ionelmc/virtualenv,ionelmc/virtualenv,ionelmc/virtualenv | virtualenv/__init__.py | virtualenv/__init__.py | from __future__ import absolute_import, division, print_function
from virtualenv.__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
from virtualenv.core import create
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
"create",
]
| from __future__ import absolute_import, division, print_function
from virtualenv.__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
| mit | Python |
65a3c3c28a673e1924441fbf42f172aba410921b | use print and catch keyboard interrupt | gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty | dusty/commands/client.py | dusty/commands/client.py | import subprocess
import logging
def call_command_from_client(shell_args, env=None):
print "Running command: {}".format(' '.join(shell_args))
try:
subprocess.call(shell_args, env=env)
except KeyboardInterrupt:
print "KeyboardInterrupt; terminating"
pass
| import subprocess
import logging
def call_command_from_client(shell_args, env=None):
logging.info("Running command: {}".format(' '.join(shell_args)))
subprocess.call(shell_args, env=env)
| mit | Python |
67ee63136e69d0c326be81e1ace72168af3dc2b3 | update try to nocaptcha packages | ninjaotoko/dynaform,ninjaotoko/dynaform,ninjaotoko/dynaform | dynaform/forms/fields.py | dynaform/forms/fields.py | # *-* coding:utf-8 *-*
import json
from django import forms
from dynaform.forms import widgets as custom_widgets
# Try importing ReCaptchaField
try:
import ReCaptchaField
except ImportError:
try:
from captcha.fields import ReCaptchaField
except ImportError:
try:
from nocaptcha_recaptcha.fields \
import NoReCaptchaField as ReCaptchaField
except ImportError:
print 'ReCaptchaField was not loaded'
class CheckOtherField(forms.fields.MultiValueField):
widget = custom_widgets.CheckOtherWidget
def __init__(self, *args, **kwargs):
list_fields = [forms.fields.BooleanField(required=False),
forms.fields.CharField()]
super(CheckOtherField, self).__init__(list_fields, *args, **kwargs)
def compress(self, values):
return json.dumps(values)
| # *-* coding:utf-8 *-*
import json
from django import forms
from dynaform.forms import widgets as custom_widgets
# Try importing ReCaptchaField
try:
import ReCaptchaField
except ImportError:
try:
from captcha.fields import ReCaptchaField
except ImportError:
print 'ReCaptchaField was not loaded'
class CheckOtherField(forms.fields.MultiValueField):
widget = custom_widgets.CheckOtherWidget
def __init__(self, *args, **kwargs):
list_fields = [forms.fields.BooleanField(required=False),
forms.fields.CharField()]
super(CheckOtherField, self).__init__(list_fields, *args, **kwargs)
def compress(self, values):
return json.dumps(values)
| bsd-3-clause | Python |
69573b07009f09eebba551db7d151f56660029b0 | Update time_selfies.py | aspuru-guzik-group/selfies | tests/time_selfies.py | tests/time_selfies.py | import os
import random
import time
from rdkit.Chem import Kekulize, MolFromSmiles, MolToSmiles
import selfies as sf
from selfies.encoder import _parse_smiles
from selfies.kekulize import kekulize_parser
def time_roundtrip(file_path: str, sample_size: int = -1):
"""Tests the amount of time it takes to encode and then decode an
entire .txt file of SMILES strings. If <sample_size> is positive,
then a random sample is taken from the file instead.
"""
curr_dir = os.path.dirname(__file__)
file_path = os.path.join(curr_dir, file_path)
# load data
with open(file_path, 'r') as file:
smiles = [line.rstrip() for line in file.readlines()]
smiles.pop(0)
if sample_size > 0:
smiles = random.sample(smiles, sample_size)
selfies = list(map(sf.encoder, smiles))
print(f"Timing {len(smiles)} SMILES from {file_path}")
# time sf.encoder
start = time.time()
for s in smiles:
sf.encoder(s)
enc_time = time.time() - start
print(f"--> selfies.encoder: {enc_time:0.7f}s")
# time sf.decoder
start = time.time()
for s in selfies:
sf.decoder(s)
dec_time = time.time() - start
print(f"--> selfies.decoder: {dec_time:0.7f}s")
def time_kekulize(file_path: str, sample_size: int = -1):
curr_dir = os.path.dirname(__file__)
file_path = os.path.join(curr_dir, file_path)
# load data
with open(file_path, 'r') as file:
smiles = [line.rstrip() for line in file.readlines()]
smiles.pop(0)
if sample_size > 0:
smiles = random.sample(smiles, sample_size)
print(f"Timing Kekulization of {len(smiles)} SMILES from {file_path}")
# time selfies kekulization
start = time.time()
for s in smiles:
list(kekulize_parser(_parse_smiles(s)))
selfies_time = time.time() - start
print(f"--> selfies kekulize: {selfies_time:0.7f}s")
# time RDKit kekulization
start = time.time()
for s in smiles:
m = MolFromSmiles(s)
Kekulize(m)
MolToSmiles(m, kekuleSmiles=True)
rdkit_time = time.time() - start
print(f"--> RDKit kekulize: {rdkit_time:0.7f}s")
if __name__ == '__main__':
# temporary example
time_roundtrip('test_sets/250K_ZINC.txt')
# time_kekulize('test_sets/250K_ZINC.txt')
| import os
import random
import time
from rdkit.Chem import Kekulize, MolFromSmiles, MolToSmiles
import selfies as sf
from selfies.encoder import _parse_smiles
from selfies.kekulize import kekulize_parser
def time_roundtrip(file_path: str, sample_size: int = -1):
"""Tests the amount of time it takes to encode and then decode an
entire .txt file of SMILES strings <n> times. If <sample_size> is positive,
then a random sample is taken from the file instead.
"""
curr_dir = os.path.dirname(__file__)
file_path = os.path.join(curr_dir, file_path)
# load data
with open(file_path, 'r') as file:
smiles = [line.rstrip() for line in file.readlines()]
smiles.pop(0)
if sample_size > 0:
smiles = random.sample(smiles, sample_size)
selfies = list(map(sf.encoder, smiles))
print(f"Timing {len(smiles)} SMILES from {file_path}")
# time sf.encoder
start = time.time()
for s in smiles:
sf.encoder(s)
enc_time = time.time() - start
print(f"--> selfies.encoder: {enc_time:0.7f}s")
# time sf.decoder
start = time.time()
for s in selfies:
sf.decoder(s)
dec_time = time.time() - start
print(f"--> selfies.decoder: {dec_time:0.7f}s")
def time_kekulize(file_path: str, sample_size: int = -1):
curr_dir = os.path.dirname(__file__)
file_path = os.path.join(curr_dir, file_path)
# load data
with open(file_path, 'r') as file:
smiles = [line.rstrip() for line in file.readlines()]
smiles.pop(0)
if sample_size > 0:
smiles = random.sample(smiles, sample_size)
print(f"Timing Kekulization of {len(smiles)} SMILES from {file_path}")
# time selfies kekulization
start = time.time()
for s in smiles:
list(kekulize_parser(_parse_smiles(s)))
selfies_time = time.time() - start
print(f"--> selfies kekulize: {selfies_time:0.7f}s")
# time RDKit kekulization
start = time.time()
for s in smiles:
m = MolFromSmiles(s)
Kekulize(m)
MolToSmiles(m, kekuleSmiles=True)
rdkit_time = time.time() - start
print(f"--> RDKit kekulize: {rdkit_time:0.7f}s")
if __name__ == '__main__':
# temporary example
time_roundtrip('test_sets/250K_ZINC.txt')
# time_kekulize('test_sets/250K_ZINC.txt')
| apache-2.0 | Python |
7508d48b12c88b37247ceeac9d3e15eb6c1fbfaa | Refactor and simplify parser | CivicKnowledge/metatab,CivicKnowledge/metatab,CivicKnowledge/metatab-py,Metatab/metatab,Metatab/metatab,CivicKnowledge/metatab-py | metatab/_meta.py | metatab/_meta.py | __version__ = '0.0.9'
__author__ = 'eric@civicknowledge.com' | __version__ = '0.0.8'
__author__ = 'eric@civicknowledge.com' | bsd-3-clause | Python |
20f9bbab583dea200a38da2578fbb9bc7bafaf92 | add 'dask_client_create()' to pyxrf.api package | NSLS-II/PyXRF,NSLS-II-HXN/PyXRF,NSLS-II-HXN/PyXRF | pyxrf/api.py | pyxrf/api.py | from .model.fileio import (stitch_fitted_results, spec_to_hdf, create_movie, # noqa: F401
combine_data_to_recon, h5file_for_recon, export_to_view, # noqa: F401
make_hdf_stitched) # noqa: F401
from .model.load_data_from_db import make_hdf, export1d # noqa: F401
from .model.command_tools import fit_pixel_data_and_save, pyxrf_batch # noqa: F401
from .xanes_maps.xanes_maps_api import build_xanes_map # noqa: F401
from .simulation.sim_xrf_scan_data import gen_hdf5_qa_dataset, gen_hdf5_qa_dataset_preset_1 # noqa: F401
from .core.map_processing import dask_client_create # noqa: F401
# Note: the statement '# noqa: F401' is telling flake8 to ignore violation F401 at the given line
# Violation F401 - the package is imported but unused
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s : %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
try:
from .model.load_data_from_db import db
except ImportError:
db = None
logger.error("Databroker is not available.")
try:
from .model.load_data_from_db import db_analysis
except ImportError:
db_analysis = None
# We don't use 'analysis' databroker, so disable the message for now
# logger.error("'Analysis' databroker is not available.")
def pyxrf_api():
r"""
=======================================================================================
Module ``pyxrf.api`` supports the following functions:
Loading data:
make_hdf - load XRF mapping data from databroker
Data processing:
pyxrf_batch - batch processing of XRF maps
build_xanes_map - generation and processing of XANES maps
Dask client:
dask_client_create - returns Dask client for use in batch scripts
Simulation of datasets:
gen_hdf5_qa_dataset - generate quantitative analysis dataset
gen_hdf5_qa_dataset_preset_1 - generate the dataset based on preset parameters
VIEW THIS MESSAGE AT ANY TIME: pyxrf_api()
For more detailed descriptions of the supported functions, type ``help(<function-name>)``
in IPython command prompt.
=========================================================================================
"""
print(pyxrf_api.__doc__)
pyxrf_api()
| from .model.fileio import (stitch_fitted_results, spec_to_hdf, create_movie, # noqa: F401
combine_data_to_recon, h5file_for_recon, export_to_view, # noqa: F401
make_hdf_stitched) # noqa: F401
from .model.load_data_from_db import make_hdf, export1d # noqa: F401
from .model.command_tools import fit_pixel_data_and_save, pyxrf_batch # noqa: F401
from .xanes_maps.xanes_maps_api import build_xanes_map # noqa: F401
from .simulation.sim_xrf_scan_data import gen_hdf5_qa_dataset, gen_hdf5_qa_dataset_preset_1 # noqa: F401
# from .model.command_tools import pyxrf_batch # noqa: F401
# Note: the statement '# noqa: F401' is telling flake8 to ignore violation F401 at the given line
# Violation F401 - the package is imported but unused
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s : %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
try:
from .model.load_data_from_db import db
except ImportError:
db = None
logger.error("Databroker is not available.")
try:
from .model.load_data_from_db import db_analysis
except ImportError:
db_analysis = None
# We don't use 'analysis' databroker, so disable the message for now
# logger.error("'Analysis' databroker is not available.")
def pyxrf_api():
r"""
=======================================================================================
Module ``pyxrf.api`` supports the following functions:
Loading data:
make_hdf - load XRF mapping data from databroker
Data processing:
pyxrf_batch - batch processing of XRF maps
build_xanes_map - generation and processing of XANES maps
Simulation of datasets:
gen_hdf5_qa_dataset - generate quantitative analysis dataset
gen_hdf5_qa_dataset_preset_1 - generate the dataset based on preset parameters
VIEW THIS MESSAGE AT ANY TIME: pyxrf_api()
For more detailed descriptions of the supported functions, type ``help(<function-name>)``
in IPython command prompt.
=========================================================================================
"""
print(pyxrf_api.__doc__)
pyxrf_api()
| bsd-3-clause | Python |
2c17eac88106749344281686e7f83929d5883120 | add test for Pathhelper | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | server/Mars/Lib/PathHelper.py | server/Mars/Lib/PathHelper.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def addPath(newPath):
import os
import sys
if not os.path.exists(newPath):
return -1
newPath = os.path.abspath(newPath)
if sys.platform == 'win32':
newPath = newPath.lower()
for path in sys.path:
path = os.path.abspath(path)
path = path.lower()
if newPath in (path, path + os.sep):
return 0
sys.path.append(newPath)
return 1
def addPathes(*pathes):
"""Add pathes into sys.path for Python
* 1: success
* 0: already in sys.path
* -1: new path not exists
"""
for path in pathes:
addPath(path)
return 1
if __name__ == '__main__':
import sys
print('current sys.path >>> %s' % sys.path)
print()
addPathes('./', '../')
print('current sys.path >>> %s' % sys.path)
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def addPath(newPath):
import os
import sys
if not os.path.exists(newPath):
return -1
newPath = os.path.abspath(newPath)
if sys.platform == 'win32':
newPath = newPath.lower()
for path in sys.path:
path = os.path.abspath(path)
path = path.lower()
if newPath in (path, path + os.sep):
return 0
sys.path.append(newPath)
return 1
def addPathes(*pathes):
"""Add pathes into sys.path for Python
* 1: success
* 0: already in sys.path
* -1: new path not exists
"""
for path in pathes:
addPath(path)
return 1
| bsd-2-clause | Python |
2c50903030c8a00ce253acfd5ce3353f89ea039a | Update embed function | explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc | thinc/layers/embed.py | thinc/layers/embed.py | from typing import Callable, Tuple, Optional
from ..model import Model
from ..config import registry
from ..types import Array2d
from ..initializers import uniform_init
from ..util import get_width
InT = Array2d
OutT = Array2d
@registry.layers("Embed.v0")
def Embed(
nO: Optional[int] = None,
nV: int = 1,
*,
column: int = 0,
initializer: Callable = uniform_init,
) -> Model[InT, OutT]:
"""Map integers to vectors, using a fixed-size lookup table."""
model: Model[InT, OutT] = Model(
"embed",
forward,
init=create_init(initializer),
dims={"nO": nO, "nV": nV},
attrs={"column": column},
params={"E": None},
)
if nO is not None:
model.initialize()
return model
def forward(model: Model[InT, OutT], ids: InT, is_train: bool) -> Tuple[OutT, Callable]:
nV = model.get_dim("nV")
vectors = model.get_param("E")
column = model.get_attr("column")
input_shape = tuple(ids.shape)
if ids.ndim == 2:
ids = ids[:, column]
ids[ids >= nV] = 0
output = vectors[ids]
def backprop(d_output: OutT) -> InT:
d_vectors = model.ops.alloc_f2d(*vectors.shape)
model.ops.scatter_add(d_vectors, ids, d_output)
model.inc_grad("E", d_vectors)
dX = model.ops.alloc(input_shape, dtype=ids.dtype)
return dX
return output, backprop
def create_init(initializer: Callable) -> Callable:
def init(
model: Model[InT, OutT], X: Optional[InT] = None, Y: Optional[OutT] = None
) -> None:
if Y is not None:
model.set_dim("nO", get_width(Y))
shape = (model.get_dim("nV"), model.get_dim("nO"))
vectors = initializer(model.ops, shape)
model.set_param("E", vectors)
return init
| from typing import Callable, Tuple, Optional
from ..model import Model
from ..config import registry
from ..types import Array2d
from ..initializers import uniform_init
from ..util import get_width
InT = Array2d
OutT = Array2d
@registry.layers("Embed.v0")
def Embed(
nO: Optional[int] = None,
nV: int = 1,
*,
column: int = 0,
initializer: Callable = uniform_init,
) -> Model[InT, OutT]:
"""Map integers to vectors, using a fixed-size lookup table."""
model: Model[InT, OutT] = Model(
"embed",
forward,
init=create_init(initializer),
dims={"nO": nO, "nV": nV},
attrs={"column": column},
params={"E": None},
)
if nO is not None:
model.initialize()
return model
def forward(model: Model[InT, OutT], ids: InT, is_train: bool) -> Tuple[OutT, Callable]:
nV = model.get_dim("nV")
vectors = model.get_param("E")
column = model.get_attr("column")
input_shape = tuple(ids.shape)
if ids.ndim == 2:
ids = ids[:, column]
ids[ids >= nV] = 0
output = vectors[ids]
def backprop(d_output: OutT) -> InT:
d_vectors = model.ops.alloc_f2d(*vectors.shape)
model.ops.scatter_add(d_vectors, ids, d_output)
model.inc_grad("E", d_vectors)
dX = model.ops.alloc(input_shape, dtype=ids.dtype)
return dX
return output, backprop
def create_init(initializer: Callable) -> Callable:
def init(
model: Model[InT, OutT], X: Optional[InT] = None, Y: Optional[OutT] = None
) -> None:
if Y is not None:
model.set_dim("nO", get_width(Y))
shape = (model.get_dim("nV"), model.get_dim("nO"))
vectors = initializer(model.ops.alloc_f2d(*shape))
model.set_param("E", vectors)
return init
| mit | Python |
1d356aa98af97894cb4c16e32d83bc3c774c840a | Bump up the version to 2.1 | mpi-sws-rse/thingflow-python,mpi-sws-rse/thingflow-python | thingflow/__init__.py | thingflow/__init__.py | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
This is the main package for antevents. Directly within this package you fill
find the following module:
* `base` - the core abstractions and classes of the system.
The rest of the functionality is in sub-packages:
* `adapters` - components to read/write events outside the system
* `internal` - some internal definitions
* `filters` - filters that allow linq-style query pipelines over event streams
* `sensors` - interfaces to sensors go here
"""
__version__ = "2.1.0"
| # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
This is the main package for antevents. Directly within this package you fill
find the following module:
* `base` - the core abstractions and classes of the system.
The rest of the functionality is in sub-packages:
* `adapters` - components to read/write events outside the system
* `internal` - some internal definitions
* `filters` - filters that allow linq-style query pipelines over event streams
* `sensors` - interfaces to sensors go here
"""
__version__ = "2.0.0"
| apache-2.0 | Python |
0f47282af3845eafabb36f3cfa41557aef2445ed | Bump version to 1.2.7 | open-io/oio-swift,open-io/oio-swift | oioswift/__init__.py | oioswift/__init__.py | __version__ = '1.2.7'
| __version__ = '1.2.4'
| apache-2.0 | Python |
6d9dbc7e99ff8dabd26bc8ee323bce4bee3c5fb4 | bump version | matthiask/feincms-elephantblog,joshuajonah/feincms-elephantblog,sbaechler/feincms-elephantblog,michaelkuty/feincms-elephantblog,joshuajonah/feincms-elephantblog,sbaechler/feincms-elephantblog,matthiask/feincms-elephantblog,michaelkuty/feincms-elephantblog,joshuajonah/feincms-elephantblog,feincms/feincms-elephantblog,michaelkuty/feincms-elephantblog,feincms/feincms-elephantblog,matthiask/feincms-elephantblog,sbaechler/feincms-elephantblog | elephantblog/__init__.py | elephantblog/__init__.py | from __future__ import absolute_import, unicode_literals
VERSION = (0, 3, 3)
__version__ = '.'.join(map(str, VERSION))
| from __future__ import absolute_import, unicode_literals
VERSION = (0, 3, 2)
__version__ = '.'.join(map(str, VERSION))
| bsd-3-clause | Python |
a7a68a5f7e4825888ae1ec166afc00df6fc72b0e | Fix docstring | darkfeline/mir.anidb | mir/anidb/api.py | mir/anidb/api.py | # Copyright (C) 2016, 2017 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Low level API for AniDB.
https://wiki.anidb.net/w/API
"""
import io
from typing import NamedTuple
import xml.etree.ElementTree as ET
import requests
_TITLES = 'http://anidb.net/api/anime-titles.xml.gz'
_HTTPAPI = 'http://api.anidb.net:9001/httpapi'
def titles_request() -> 'Response':
"""Request titles.
https://wiki.anidb.net/w/API#Anime_Titles
"""
return requests.get(_TITLES)
def async_titles_request(session) -> 'Response':
"""Request titles asynchronously.
https://wiki.anidb.net/w/API#Anime_Titles
"""
return session.get(_TITLES)
class Client(NamedTuple):
name: str
version: int
def httpapi_request(client, **params) -> 'Response':
"""Send a request to AniDB HTTP API.
https://wiki.anidb.net/w/HTTP_API_Definition
"""
return requests.get(
_HTTPAPI,
params={
'client': client.name,
'clientver': client.version,
'protover': 1,
**params
})
def async_httpapi_request(session: 'ClientSession', client, **params) -> 'ClientResponse':
"""Send an asynchronous request to AniDB HTTP API.
https://wiki.anidb.net/w/HTTP_API_Definition
"""
return session.get(
_HTTPAPI,
params={
'client': client.name,
'clientver': client.version,
'protover': 1,
**params
})
def unpack_xml(text) -> ET.ElementTree:
"""Unpack an XML string from AniDB API."""
etree: ET.ElementTree = ET.parse(io.StringIO(text))
_check_for_errors(etree)
return etree
def _check_for_errors(etree: ET.ElementTree):
"""Check AniDB response XML tree for errors."""
if etree.getroot().tag == 'error':
raise APIError(etree.getroot().text)
class APIError(Exception):
"""AniDB API error."""
| # Copyright (C) 2016, 2017 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Low level API for AniDB.
https://wiki.anidb.net/w/API
"""
import io
from typing import NamedTuple
import xml.etree.ElementTree as ET
import requests
_TITLES = 'http://anidb.net/api/anime-titles.xml.gz'
_HTTPAPI = 'http://api.anidb.net:9001/httpapi'
def titles_request() -> 'Response':
"""Request titles.
https://wiki.anidb.net/w/API#Anime_Titles
"""
return requests.get(_TITLES)
def async_titles_request(session) -> 'Response':
"""Request titles.
https://wiki.anidb.net/w/API#Anime_Titles
"""
return session.get(_TITLES)
class Client(NamedTuple):
name: str
version: int
def httpapi_request(client, **params) -> 'Response':
"""Send a request to AniDB HTTP API.
https://wiki.anidb.net/w/HTTP_API_Definition
"""
return requests.get(
_HTTPAPI,
params={
'client': client.name,
'clientver': client.version,
'protover': 1,
**params
})
def async_httpapi_request(session: 'ClientSession', client, **params) -> 'ClientResponse':
"""Send an asynchronous request to AniDB HTTP API.
https://wiki.anidb.net/w/HTTP_API_Definition
"""
return session.get(
_HTTPAPI,
params={
'client': client.name,
'clientver': client.version,
'protover': 1,
**params
})
def unpack_xml(text) -> ET.ElementTree:
"""Unpack an XML string from AniDB API."""
etree: ET.ElementTree = ET.parse(io.StringIO(text))
_check_for_errors(etree)
return etree
def _check_for_errors(etree: ET.ElementTree):
"""Check AniDB response XML tree for errors."""
if etree.getroot().tag == 'error':
raise APIError(etree.getroot().text)
class APIError(Exception):
"""AniDB API error."""
| apache-2.0 | Python |
df35c2bd6a5a78469870f5bc5ca822b197451b9d | Update test.py | HuimingCheng/AutoGrading,Hubert51/AutoGrading,Hubert51/AutoGrading,HuimingCheng/AutoGrading,Hubert51/AutoGrading,Hubert51/AutoGrading,HuimingCheng/AutoGrading,HuimingCheng/AutoGrading,Hubert51/AutoGrading,HuimingCheng/AutoGrading | learning/test.py | learning/test.py | import cv2
| print(120)
| mit | Python |
c58ddb58e6c35142eba077f86654d2a69a04504a | Fix astrobin_apps_notifications tests | astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin | astrobin_apps_notifications/tests.py | astrobin_apps_notifications/tests.py | # Django
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
# This app
from astrobin_apps_notifications.utils import *
from astrobin_apps_notifications.templatetags.astrobin_apps_notifications_tags import *
class NotificationsTest(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(
username = "user1",
email = "user1@test.com",
password = "password")
self.user2 = User.objects.create_user(
username = "user2",
email = "user1@test.com",
password = "password")
def tearDown(self):
self.user1.delete()
self.user2.delete()
def test_test_notification_view(self):
self.client.login(username = 'user1', password = 'password')
response = self.client.post(
reverse('astrobin_apps_notifications.test_notification',
args = ('user2',)), {})
self.assertEquals(response.status_code, 200)
self.assertEquals("test_notification sent" in response.content, True)
self.assertEquals(get_recent_notifications(self.user2).count(), 1)
self.assertEquals(get_seen_notifications(self.user2).count(), 0)
self.assertEquals(get_unseen_notifications(self.user2).count(), 1)
self.client.logout()
def test_notification_list_tag(self):
self.client.login(username = 'user1', password = 'password')
response = self.client.post(
reverse('astrobin_apps_notifications.test_notification',
args = ('user2',)), {})
self.client.logout()
self.client.login(username = 'user2', password = 'password')
response = notification_list(self.user2, -1, -1)
self.assertEquals(len(response['unseen']), 1)
self.assertEquals(response['unseen'][0], get_unseen_notifications(self.user2)[0])
self.assertEquals(len(response['seen']), 0)
self.client.logout()
| # Django
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
# This app
from astrobin_apps_notifications.utils import *
from astrobin_apps_notifications.templatetags.astrobin_apps_notifications_tags import *
class NotificationsTest(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(
username = "user1",
email = "user1@test.com",
password = "password")
self.user2 = User.objects.create_user(
username = "user2",
email = "user1@test.com",
password = "password")
def tearDown(self):
self.user1.delete()
self.user2.delete()
def test_test_notification_view(self):
self.client.login(username = 'user1', password = 'password')
response = self.client.post(
reverse('astrobin_apps_notifications.test_notification',
args = ('user2',)), {})
self.assertEquals(response.status_code, 200)
self.assertEquals("test_notification sent" in response.content, True)
self.assertEquals(get_recent_notifications(self.user2).count(), 1)
self.assertEquals(get_seen_notifications(self.user2).count(), 0)
self.assertEquals(get_unseen_notifications(self.user2).count(), 1)
self.client.logout()
def test_notification_list_tag(self):
self.client.login(username = 'user1', password = 'password')
response = self.client.post(
reverse('astrobin_apps_notifications.test_notification',
args = ('user2',)), {})
self.client.logout()
self.client.login(username = 'user2', password = 'password')
response = notification_list(self.user2)
self.assertEquals(len(response['unseen']), 1)
self.assertEquals(response['unseen'][0], get_unseen_notifications(self.user2)[0])
self.assertEquals(len(response['seen']), 0)
self.client.logout()
| agpl-3.0 | Python |
1eb4d00e005f22ae452ce9d36b9fce69fa9b96f7 | Drop extra trailing slash from notices_url | astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin | astrobin_apps_notifications/utils.py | astrobin_apps_notifications/utils.py | # Python
import simplejson
import urllib2
# Django
from django.conf import settings
from django.template.loader import render_to_string
# Third party
from gadjo.requestprovider.signals import get_request
from notification import models as notification
from persistent_messages.models import Message
import persistent_messages
def push_notification(recipients, notice_type, data):
data.update({'notices_url': settings.ASTROBIN_BASE_URL})
# Send as email
notification.send(recipients, notice_type, data)
# Send as persistent message
try:
request = get_request()
except IndexError:
# This may happen during unit testing
return
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for fmt in formats:
# conditionally turn off autoescaping for .txt extensions in format
if fmt.endswith(".txt"):
context.autoescape = False
format_templates[fmt] = render_to_string((
"notification/%s/%s" % (label, fmt),
"notification/%s" % fmt), context=context)
return format_templates
messages = get_formatted_messages(['notice.html'], notice_type, data)
for recipient in recipients:
persistent_messages.add_message(
request,
persistent_messages.INFO,
messages['notice.html'],
user = recipient)
def get_recent_notifications(user, n = 10):
if not user.is_authenticated():
return None
notifications = Message.objects.filter(user = user).order_by('-created')
if n >= 0:
notifications = notifications[:n]
return notifications
def get_unseen_notifications(user, n = 10):
if not user.is_authenticated():
return None
notifications =\
Message.objects.filter(user = user, read = False).order_by('-created')
if n >= 0:
notifications = notifications[:n]
return notifications
def get_seen_notifications(user, n = 10):
if not user.is_authenticated():
return None
notifications =\
Message.objects.filter(user = user, read = True).order_by('-created')
if n >= 0:
notifications = notifications[:n]
return notifications
| # Python
import simplejson
import urllib2
# Django
from django.conf import settings
from django.template.loader import render_to_string
# Third party
from gadjo.requestprovider.signals import get_request
from notification import models as notification
from persistent_messages.models import Message
import persistent_messages
def push_notification(recipients, notice_type, data):
data.update({'notices_url': settings.ASTROBIN_BASE_URL + '/'})
# Send as email
notification.send(recipients, notice_type, data)
# Send as persistent message
try:
request = get_request()
except IndexError:
# This may happen during unit testing
return
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for fmt in formats:
# conditionally turn off autoescaping for .txt extensions in format
if fmt.endswith(".txt"):
context.autoescape = False
format_templates[fmt] = render_to_string((
"notification/%s/%s" % (label, fmt),
"notification/%s" % fmt), context=context)
return format_templates
messages = get_formatted_messages(['notice.html'], notice_type, data)
for recipient in recipients:
persistent_messages.add_message(
request,
persistent_messages.INFO,
messages['notice.html'],
user = recipient)
def get_recent_notifications(user, n = 10):
if not user.is_authenticated():
return None
notifications = Message.objects.filter(user = user).order_by('-created')
if n >= 0:
notifications = notifications[:n]
return notifications
def get_unseen_notifications(user, n = 10):
if not user.is_authenticated():
return None
notifications =\
Message.objects.filter(user = user, read = False).order_by('-created')
if n >= 0:
notifications = notifications[:n]
return notifications
def get_seen_notifications(user, n = 10):
if not user.is_authenticated():
return None
notifications =\
Message.objects.filter(user = user, read = True).order_by('-created')
if n >= 0:
notifications = notifications[:n]
return notifications
| agpl-3.0 | Python |
3d813e9960a44bfacd94551b95caf489b85ea3bb | Fix typo in upgrade script | exocad/exotrac,dafrito/trac-mirror,dokipen/trac,dokipen/trac,dokipen/trac,exocad/exotrac,exocad/exotrac,dafrito/trac-mirror,moreati/trac-gitsvn,moreati/trac-gitsvn,dafrito/trac-mirror,exocad/exotrac,moreati/trac-gitsvn,dafrito/trac-mirror,moreati/trac-gitsvn | trac/upgrades/db11.py | trac/upgrades/db11.py | import os.path
import shutil
sql = """
-- Remove empty values from the milestone list
DELETE FROM milestone WHERE COALESCE(name,'')='';
-- Add a description column to the version table, and remove unnamed versions
CREATE TEMP TABLE version_old AS SELECT * FROM version;
DROP TABLE version;
CREATE TABLE version (
name text PRIMARY KEY,
time integer,
description text
);
INSERT INTO version(name,time,description)
SELECT name,time,'' FROM version_old WHERE COALESCE(name,'')<>'';
-- Add a description column to the component table, and remove unnamed components
CREATE TEMP TABLE component_old AS SELECT * FROM component;
DROP TABLE component;
CREATE TABLE component (
name text PRIMARY KEY,
owner text,
description text
);
INSERT INTO component(name,owner,description)
SELECT name,owner,'' FROM component_old WHERE COALESCE(name,'')<>'';
"""
def do_upgrade(env, ver, cursor):
cursor.execute(sql)
# Copy the new default wiki macros over to the environment
from trac.siteconfig import __default_macros_dir__ as macros_dir
for f in os.listdir(macros_dir):
if not f.endswith('.py'):
continue
src = os.path.join(macros_dir, f)
dst = os.path.join(env.path, 'wiki-macros', f)
if not os.path.isfile(dst):
shutil.copy2(src, dst)
| import os.path
import shutil
sql = """
-- Remove empty values from the milestone list
DELETE FROM milestone WHERE COALESCE(name,'')='';
-- Add a description column to the version table, and remove unnamed versions
CREATE TEMP TABLE version_old AS SELECT * FROM version;
DROP TABLE version;
CREATE TABLE version (
name text PRIMARY KEY,
time integer,
description text
);
INSERT INTO version(name,time,description)
SELECT name,time,'' FROM version_old WHERE COALESCE(name,'')<>'';
-- Add a description column to the component table, and remove unnamed components
CREATE TEMP TABLE component_old AS SELECT * FROM component;
DROP TABLE component;
CREATE TABLE component (
name text PRIMARY KEY,
owner text,
description text
);
INSERT INTO component(name,owner,description)
SELECT name,owner,'' FROM component_old WHERE COALESCE(name,'')<>'';
"""
def do_upgrade(env, ver, cursor):
cursor.execute(sql)
# Copy the new default wiki macros over to the environment
from trac.siteconfig import __default_macro_dir__ as macro_dir
for f in os.listdir(macro_dir):
if not f.endswith('.py'):
continue
src = os.path.join(macro_dir, f)
dst = os.path.join(env.path, 'wiki-macros', f)
if not os.path.isfile(dst):
shutil.copy2(src, dst)
| bsd-3-clause | Python |
36f5789686f16e463f76843664c654465c52d406 | add comment disabling tasks | praekelt/ummeli,praekelt/ummeli,praekelt/ummeli | ummeli/vlive/tasks.py | ummeli/vlive/tasks.py | from celery.task import task
from celery.task.sets import TaskSet
from vumiclient.client import Client
from django.conf import settings
from django.core.mail import send_mail, EmailMessage
from jmboarticles.models import Article
@task(ignore_result=True)
def send_password_reset(msisdn, new_password):
message = 'Ummeli on YAL :) Your new password is: %s' % new_password
client = Client(settings.VUMI_USERNAME, settings.VUMI_PASSWORD)
resp = client.send_sms(to_msisdn = msisdn,
from_msisdn = '1',
message = message)
@task(ignore_result=True)
def send_email(username, message):
email = EmailMessage('Blocked User: %s' % username, message,
settings.SEND_FROM_EMAIL_ADDRESS,
[settings.UMMELI_SUPPORT])
email.send(fail_silently=False)
def send_sms(msisdn, message):
client = Client(settings.VUMI_USERNAME, settings.VUMI_PASSWORD)
client.send_sms(to_msisdn = msisdn, from_msisdn = '1', message = message)
def disable_commenting():
Article.objects.filter(comments_enabled=True).update(temp_can_comment=True)
Article.objects.filter(temp_can_comment=True).update(comments_enabled=False)
def enable_commenting():
Article.objects.filter(temp_can_comment=True).update(comments_enabled=True)
| from celery.task import task
from celery.task.sets import TaskSet
from vumiclient.client import Client
from django.conf import settings
from django.core.mail import send_mail, EmailMessage
@task(ignore_result=True)
def send_password_reset(msisdn, new_password):
message = 'Ummeli on YAL :) Your new password is: %s' % new_password
client = Client(settings.VUMI_USERNAME, settings.VUMI_PASSWORD)
resp = client.send_sms(to_msisdn = msisdn,
from_msisdn = '1',
message = message)
@task(ignore_result=True)
def send_email(username, message):
email = EmailMessage('Blocked User: %s' % username, message,
settings.SEND_FROM_EMAIL_ADDRESS,
[settings.UMMELI_SUPPORT])
email.send(fail_silently=False)
def send_sms(msisdn, message):
client = Client(settings.VUMI_USERNAME, settings.VUMI_PASSWORD)
client.send_sms(to_msisdn = msisdn, from_msisdn = '1', message = message) | bsd-3-clause | Python |
e521b16844efc2853c0db9014098cb3e37f6eb04 | Add missing returns to the array API sorting functions | jakirkham/numpy,numpy/numpy,mhvk/numpy,seberg/numpy,pdebuyl/numpy,numpy/numpy,numpy/numpy,jakirkham/numpy,anntzer/numpy,charris/numpy,simongibbons/numpy,endolith/numpy,simongibbons/numpy,mhvk/numpy,seberg/numpy,mattip/numpy,mattip/numpy,jakirkham/numpy,mhvk/numpy,charris/numpy,seberg/numpy,simongibbons/numpy,pdebuyl/numpy,simongibbons/numpy,rgommers/numpy,mattip/numpy,endolith/numpy,numpy/numpy,anntzer/numpy,rgommers/numpy,pdebuyl/numpy,anntzer/numpy,charris/numpy,endolith/numpy,seberg/numpy,rgommers/numpy,mhvk/numpy,endolith/numpy,mhvk/numpy,mattip/numpy,charris/numpy,pdebuyl/numpy,jakirkham/numpy,jakirkham/numpy,rgommers/numpy,anntzer/numpy,simongibbons/numpy | numpy/_array_api/_sorting_functions.py | numpy/_array_api/_sorting_functions.py | def argsort(x, /, *, axis=-1, descending=False, stable=True):
from .. import argsort
from .. import flip
# Note: this keyword argument is different, and the default is different.
kind = 'stable' if stable else 'quicksort'
res = argsort(x, axis=axis, kind=kind)
if descending:
res = flip(res, axis=axis)
return res
def sort(x, /, *, axis=-1, descending=False, stable=True):
from .. import sort
from .. import flip
# Note: this keyword argument is different, and the default is different.
kind = 'stable' if stable else 'quicksort'
res = sort(x, axis=axis, kind=kind)
if descending:
res = flip(res, axis=axis)
return res
| def argsort(x, /, *, axis=-1, descending=False, stable=True):
from .. import argsort
from .. import flip
# Note: this keyword argument is different, and the default is different.
kind = 'stable' if stable else 'quicksort'
res = argsort(x, axis=axis, kind=kind)
if descending:
res = flip(res, axis=axis)
def sort(x, /, *, axis=-1, descending=False, stable=True):
from .. import sort
from .. import flip
# Note: this keyword argument is different, and the default is different.
kind = 'stable' if stable else 'quicksort'
res = sort(x, axis=axis, kind=kind)
if descending:
res = flip(res, axis=axis)
| bsd-3-clause | Python |
f6715a47447a3fd22d62d6f9309d1d9a156b5a85 | Bump version. | albfan/pudb,amigrave/pudb,albfan/pudb,amigrave/pudb | pudb/__init__.py | pudb/__init__.py | VERSION = "0.92.6"
CURRENT_DEBUGGER = [None]
def set_trace():
if CURRENT_DEBUGGER[0] is None:
from pudb.debugger import Debugger
dbg = Debugger()
CURRENT_DEBUGGER[0] = dbg
import sys
dbg.set_trace(sys._getframe().f_back)
def post_mortem(t):
p = Debugger()
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def pm():
import sys
post_mortem(sys.last_traceback)
if __name__ == "__main__":
print "You now need to type 'python -m pudb.run'. Sorry."
| VERSION = "0.92.5"
CURRENT_DEBUGGER = [None]
def set_trace():
if CURRENT_DEBUGGER[0] is None:
from pudb.debugger import Debugger
dbg = Debugger()
CURRENT_DEBUGGER[0] = dbg
import sys
dbg.set_trace(sys._getframe().f_back)
def post_mortem(t):
p = Debugger()
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def pm():
import sys
post_mortem(sys.last_traceback)
if __name__ == "__main__":
print "You now need to type 'python -m pudb.run'. Sorry."
| mit | Python |
12d71e50d335f8a095e3049af333112204d45775 | Remove unused import from singleton | dqnykamp/sympy,dqnykamp/sympy,sahmed95/sympy,VaibhavAgarwalVA/sympy,jerli/sympy,Vishluck/sympy,cccfran/sympy,aktech/sympy,sahmed95/sympy,sampadsaha5/sympy,wanglongqi/sympy,chaffra/sympy,chaffra/sympy,ga7g08/sympy,debugger22/sympy,beni55/sympy,abhiii5459/sympy,mcdaniel67/sympy,Davidjohnwilson/sympy,wyom/sympy,aktech/sympy,Curious72/sympy,pandeyadarsh/sympy,jaimahajan1997/sympy,wyom/sympy,Shaswat27/sympy,yukoba/sympy,cccfran/sympy,atreyv/sympy,lindsayad/sympy,Titan-C/sympy,hargup/sympy,farhaanbukhsh/sympy,skirpichev/omg,pbrady/sympy,shipci/sympy,amitjamadagni/sympy,drufat/sympy,madan96/sympy,Sumith1896/sympy,sahilshekhawat/sympy,cswiercz/sympy,kmacinnis/sympy,lidavidm/sympy,sahilshekhawat/sympy,Designist/sympy,srjoglekar246/sympy,Titan-C/sympy,saurabhjn76/sympy,aktech/sympy,hrashk/sympy,MechCoder/sympy,maniteja123/sympy,garvitr/sympy,bukzor/sympy,AkademieOlympia/sympy,ChristinaZografou/sympy,toolforger/sympy,flacjacket/sympy,ga7g08/sympy,shikil/sympy,AunShiLord/sympy,Gadal/sympy,oliverlee/sympy,debugger22/sympy,Vishluck/sympy,abhiii5459/sympy,souravsingh/sympy,Designist/sympy,pandeyadarsh/sympy,farhaanbukhsh/sympy,AkademieOlympia/sympy,jerli/sympy,MridulS/sympy,kevalds51/sympy,postvakje/sympy,meghana1995/sympy,lindsayad/sympy,jamesblunt/sympy,vipulroxx/sympy,moble/sympy,kaushik94/sympy,emon10005/sympy,Mitchkoens/sympy,jbbskinny/sympy,MridulS/sympy,minrk/sympy,abloomston/sympy,wanglongqi/sympy,oliverlee/sympy,Vishluck/sympy,garvitr/sympy,beni55/sympy,skidzo/sympy,souravsingh/sympy,pbrady/sympy,abloomston/sympy,hargup/sympy,kumarkrishna/sympy,amitjamadagni/sympy,saurabhjn76/sympy,rahuldan/sympy,ahhda/sympy,AkademieOlympia/sympy,atreyv/sympy,souravsingh/sympy,drufat/sympy,shikil/sympy,cswiercz/sympy,atsao72/sympy,atsao72/sympy,chaffra/sympy,atreyv/sympy,meghana1995/sympy,Shaswat27/sympy,liangjiaxing/sympy,postvakje/sympy,liangjiaxing/sympy,yukoba/sympy,Gadal/sympy,moble/sympy,dqnykamp/sympy,kaichogami/sympy,diofant/diofant,sampadsaha5/sympy,ChristinaZografou/sympy,mafiya69/sympy,kaushik94/sympy,cswiercz/sympy,garvitr/sympy,Mitchkoens/sympy,grevutiu-gabriel/sympy,liangjiaxing/sympy,pandeyadarsh/sympy,yukoba/sympy,saurabhjn76/sympy,lidavidm/sympy,madan96/sympy,AunShiLord/sympy,minrk/sympy,Titan-C/sympy,grevutiu-gabriel/sympy,bukzor/sympy,AunShiLord/sympy,mcdaniel67/sympy,lindsayad/sympy,oliverlee/sympy,kumarkrishna/sympy,jaimahajan1997/sympy,rahuldan/sympy,asm666/sympy,sunny94/temp,yashsharan/sympy,jbbskinny/sympy,kumarkrishna/sympy,maniteja123/sympy,hrashk/sympy,ahhda/sympy,VaibhavAgarwalVA/sympy,farhaanbukhsh/sympy,VaibhavAgarwalVA/sympy,postvakje/sympy,kaichogami/sympy,abhiii5459/sympy,hrashk/sympy,beni55/sympy,cccfran/sympy,jaimahajan1997/sympy,MechCoder/sympy,emon10005/sympy,sunny94/temp,toolforger/sympy,skidzo/sympy,mcdaniel67/sympy,kevalds51/sympy,shikil/sympy,Shaswat27/sympy,Davidjohnwilson/sympy,ga7g08/sympy,bukzor/sympy,drufat/sympy,jerli/sympy,iamutkarshtiwari/sympy,hargup/sympy,kaushik94/sympy,rahuldan/sympy,ahhda/sympy,kmacinnis/sympy,emon10005/sympy,Arafatk/sympy,Curious72/sympy,toolforger/sympy,pbrady/sympy,debugger22/sympy,Curious72/sympy,shipci/sympy,mafiya69/sympy,madan96/sympy,iamutkarshtiwari/sympy,sahmed95/sympy,MechCoder/sympy,yashsharan/sympy,sahilshekhawat/sympy,shipci/sympy,kaichogami/sympy,Arafatk/sympy,ChristinaZografou/sympy,iamutkarshtiwari/sympy,vipulroxx/sympy,kmacinnis/sympy,Mitchkoens/sympy,jamesblunt/sympy,maniteja123/sympy,Gadal/sympy,Davidjohnwilson/sympy,sampadsaha5/sympy,abloomston/sympy,sunny94/temp,mafiya69/sympy,kevalds51/sympy,jamesblunt/sympy,moble/sympy,meghana1995/sympy,asm666/sympy,grevutiu-gabriel/sympy,skidzo/sympy,yashsharan/sympy,wyom/sympy,lidavidm/sympy,vipulroxx/sympy,Designist/sympy,asm666/sympy,atsao72/sympy,Arafatk/sympy,Sumith1896/sympy,Sumith1896/sympy,MridulS/sympy,jbbskinny/sympy,wanglongqi/sympy | sympy/core/singleton.py | sympy/core/singleton.py | """Singleton mechanism"""
from core import BasicMeta, Registry
from sympify import sympify
class SingletonRegistry(Registry):
"""
A map between singleton classes and the corresponding instances.
E.g. S.Exp == C.Exp()
"""
__slots__ = []
__call__ = staticmethod(sympify)
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(BasicMeta):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Example::
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> class MySingleton(Basic):
... __metaclass__ = Singleton
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
** Developer notes **
The class is instanciated immediately at the point where it is defined
by calling cls.__new__(cls). This instance is cached and cls.__new__ is
rebound to return it directly.
The original constructor is also cached to allow subclasses to access it
and have their own instance.
"""
def __init__(cls, name, bases, dict_):
super(Singleton, cls).__init__(cls, name, bases, dict_)
for ancestor in cls.mro():
if '__new__' in ancestor.__dict__:
break
if isinstance(ancestor, Singleton) and ancestor is not cls:
ctor = ancestor._new_instance
else:
ctor = cls.__new__
cls._new_instance = staticmethod(ctor)
the_instance = ctor(cls)
def __new__(cls):
return the_instance
cls.__new__ = staticmethod(__new__)
setattr(S, name, the_instance)
# Inject pickling support.
def __getnewargs__(self):
return ()
cls.__getnewargs__ = __getnewargs__
| """Singleton mechanism"""
from core import BasicMeta, Registry
from sympify import sympify
from basic import Basic
class SingletonRegistry(Registry):
"""
A map between singleton classes and the corresponding instances.
E.g. S.Exp == C.Exp()
"""
__slots__ = []
__call__ = staticmethod(sympify)
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(BasicMeta):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Example::
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> class MySingleton(Basic):
... __metaclass__ = Singleton
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
** Developer notes **
The class is instanciated immediately at the point where it is defined
by calling cls.__new__(cls). This instance is cached and cls.__new__ is
rebound to return it directly.
The original constructor is also cached to allow subclasses to access it
and have their own instance.
"""
def __init__(cls, name, bases, dict_):
super(Singleton, cls).__init__(cls, name, bases, dict_)
for ancestor in cls.mro():
if '__new__' in ancestor.__dict__:
break
if isinstance(ancestor, Singleton) and ancestor is not cls:
ctor = ancestor._new_instance
else:
ctor = cls.__new__
cls._new_instance = staticmethod(ctor)
the_instance = ctor(cls)
def __new__(cls):
return the_instance
cls.__new__ = staticmethod(__new__)
setattr(S, name, the_instance)
# Inject pickling support.
def __getnewargs__(self):
return ()
cls.__getnewargs__ = __getnewargs__
| bsd-3-clause | Python |
8846f410ffdc707e648d6786dd31247f0412eccf | Update version.py | scieloorg/packtools,scieloorg/packtools,scieloorg/packtools | packtools/version.py | packtools/version.py | """Single source to the version across setup.py and the whole project.
"""
from __future__ import unicode_literals
__version__ = '2.10.2'
| """Single source to the version across setup.py and the whole project.
"""
from __future__ import unicode_literals
__version__ = '2.10.0'
| bsd-2-clause | Python |
f57511df7de901f3694fa4225af458e192975299 | Fix PdfDict: __getattr__, not __getattribute__ | ajmarks/gymnast,ajmarks/gymnast | pdf_parser/pdf_types/compound_types.py | pdf_parser/pdf_types/compound_types.py | from .common import PdfType
class PdfArray(PdfType, list):
def __init__(self, *args, **kwargs):
PdfType.__init__(self)
list.__init__(self, *args, **kwargs)
class PdfDict(PdfType, dict):
def __init__(self, *args, **kwargs):
PdfType.__init__(self)
dict.__init__(self, *args, **kwargs)
def __getattr__(self, name):
try:
return self[name].parsed_object
except AttributeError:
return self[name]
except KeyError:
raise AttributeError('Object has no attribute "%s"'%name) | from .common import PdfType
class PdfArray(PdfType, list):
def __init__(self, *args, **kwargs):
PdfType.__init__(self)
list.__init__(self, *args, **kwargs)
class PdfDict(PdfType, dict):
def __init__(self, *args, **kwargs):
PdfType.__init__(self)
dict.__init__(self, *args, **kwargs)
def __getattribute__(self, name):
try:
return self[name].parsed_object
except AttributeError:
return self[name]
except KeyError:
raise AttributeError('Object has no attribute "%s"'%name) | mit | Python |
e8fdc11c227c47ed2d26ce5516bad305a63c64d9 | update migration for django 2 compatibility | okfn/foundation,okfn/website,okfn/foundation,okfn/website,okfn/website,okfn/foundation,okfn/website,okfn/foundation | aldryn_video/migrations/0001_initial.py | aldryn_video/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0015_auto_20160421_0000'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OEmbedVideoPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE,
)),
('url', models.URLField(help_text='vimeo and youtube supported.', max_length=100, verbose_name='URL')),
('width', models.IntegerField(null=True, verbose_name='Width', blank=True)),
('height', models.IntegerField(null=True, verbose_name='Height', blank=True)),
('iframe_width', models.CharField(max_length=15, verbose_name='iframe width', blank=True)),
('iframe_height', models.CharField(max_length=15, verbose_name='iframe height', blank=True)),
('auto_play', models.BooleanField(default=False, verbose_name='auto play')),
('loop_video', models.BooleanField(default=False, help_text='when true, the video repeats itself when over.', verbose_name='loop')),
('oembed_data', jsonfield.fields.JSONField(null=True)),
('custom_params', models.CharField(help_text='define custom params (e.g. "start=10&end=50")', max_length=200, verbose_name='custom params', blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0015_auto_20160421_0000'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OEmbedVideoPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('url', models.URLField(help_text='vimeo and youtube supported.', max_length=100, verbose_name='URL')),
('width', models.IntegerField(null=True, verbose_name='Width', blank=True)),
('height', models.IntegerField(null=True, verbose_name='Height', blank=True)),
('iframe_width', models.CharField(max_length=15, verbose_name='iframe width', blank=True)),
('iframe_height', models.CharField(max_length=15, verbose_name='iframe height', blank=True)),
('auto_play', models.BooleanField(default=False, verbose_name='auto play')),
('loop_video', models.BooleanField(default=False, help_text='when true, the video repeats itself when over.', verbose_name='loop')),
('oembed_data', jsonfield.fields.JSONField(null=True)),
('custom_params', models.CharField(help_text='define custom params (e.g. "start=10&end=50")', max_length=200, verbose_name='custom params', blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| mit | Python |
3cb6a475008dfa4eacb1f25175737dbf0754eee2 | Fix error when logging removal of soft-deleted project | opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind | src/waldur_mastermind/support/log.py | src/waldur_mastermind/support/log.py | from waldur_core.logging.loggers import EventLogger, event_logger
from waldur_core.structure.models import Project
from waldur_core.structure.permissions import _get_project
from . import models
def get_issue_scopes(issue):
result = set()
if issue.resource:
project = _get_project(issue.resource)
result.add(issue.resource)
result.add(project)
result.add(project.customer)
if issue.project_id:
project = Project.all_objects.get(
id=issue.project_id
) # handle soft-deleted projects
result.add(project)
result.add(issue.customer)
if issue.customer:
result.add(issue.customer)
return result
class IssueEventLogger(EventLogger):
issue = models.Issue
class Meta:
event_types = (
'issue_deletion_succeeded',
'issue_update_succeeded',
'issue_creation_succeeded',
)
event_groups = {
'support': event_types,
}
@staticmethod
def get_scopes(event_context):
issue = event_context['issue']
return get_issue_scopes(issue)
class AttachmentEventLogger(EventLogger):
attachment = models.Attachment
class Meta:
event_types = (
'attachment_created',
'attachment_updated',
'attachment_deleted',
)
event_groups = {
'support': event_types,
}
@staticmethod
def get_scopes(event_context):
attachment = event_context['attachment']
return get_issue_scopes(attachment.issue)
class OfferingEventLogger(EventLogger):
offering = models.Offering
class Meta:
event_types = (
'offering_created',
'offering_deleted',
'offering_state_changed',
)
event_groups = {
'support': event_types,
}
@staticmethod
def get_scopes(event_context):
offering = event_context['offering']
project = Project.all_objects.get(
id=offering.project_id
) # handle case when project is already deleted
return {offering, project, project.customer}
event_logger.register('waldur_issue', IssueEventLogger)
event_logger.register('waldur_attachment', AttachmentEventLogger)
event_logger.register('waldur_offering', OfferingEventLogger)
| from waldur_core.logging.loggers import EventLogger, event_logger
from waldur_core.structure.models import Project
from waldur_core.structure.permissions import _get_project
from . import models
def get_issue_scopes(issue):
result = set()
if issue.resource:
project = _get_project(issue.resource)
result.add(issue.resource)
result.add(project)
result.add(project.customer)
if issue.project:
result.add(issue.project)
result.add(issue.customer)
if issue.customer:
result.add(issue.customer)
return result
class IssueEventLogger(EventLogger):
issue = models.Issue
class Meta:
event_types = (
'issue_deletion_succeeded',
'issue_update_succeeded',
'issue_creation_succeeded',
)
event_groups = {
'support': event_types,
}
@staticmethod
def get_scopes(event_context):
issue = event_context['issue']
return get_issue_scopes(issue)
class AttachmentEventLogger(EventLogger):
attachment = models.Attachment
class Meta:
event_types = (
'attachment_created',
'attachment_updated',
'attachment_deleted',
)
event_groups = {
'support': event_types,
}
@staticmethod
def get_scopes(event_context):
attachment = event_context['attachment']
return get_issue_scopes(attachment.issue)
class OfferingEventLogger(EventLogger):
offering = models.Offering
class Meta:
event_types = (
'offering_created',
'offering_deleted',
'offering_state_changed',
)
event_groups = {
'support': event_types,
}
@staticmethod
def get_scopes(event_context):
offering = event_context['offering']
project = Project.all_objects.get(
id=offering.project_id
) # handle case when project is already deleted
return {offering, project, project.customer}
event_logger.register('waldur_issue', IssueEventLogger)
event_logger.register('waldur_attachment', AttachmentEventLogger)
event_logger.register('waldur_offering', OfferingEventLogger)
| mit | Python |
070b532b041cec627b9823e8030c3e8d974e18c4 | prepare 1.6.0 | naphatkrit/easyci | easyci/__init__.py | easyci/__init__.py | __version__ = '1.6.0'
| __version__ = '1.5.0'
| mit | Python |
ae5591de01e00a018c302d4eb5afd1831fbd0812 | add phone number | dropbox-dashbpard/error-detect-of-log,dropbox-dashbpard/error-detect-of-log | ed/kernel_panic.py | ed/kernel_panic.py | from utils import detect_string, gen_hashcode, jsonify_headers
KERNEL_PANIC = [
r"(kernel BUG at[^\n]+)",
r"(spinlock bad magic)",
r"(Unable to handle kernel[^\n]+)",
r"(modem subsystem failure reason:.*Could not turn on the UNIV_STMR ustmr_qtimer_off_counter)",
r"(\w+ subsystem failure reason:[^\n]+)",
r"(Kernel panic - not syncing: [^\n]*: Timed out waiting for error ready: modem)",
r"(Kernel panic - [^\n]*)"
]
IGNORES = [
r"Crash injected via Diag",
r"SysRq : Trigger a crash"
]
def kernel_panic(logcat, headers):
parts = logcat.split('\n\n')
for content in parts[1:]:
process = "kernel"
for ignore in IGNORES:
if detect_string(content, ignore):
return None, None, None
for pattern in KERNEL_PANIC:
reason = detect_string(content, pattern)
if reason:
result = {'issue_owner': process, 'detail': reason}
if "should check the ramdump" in reason:
try:
UA = jsonify_headers(headers.get('X-Dropbox-UA', '='))
for key in ['imei', 'mac_address', 'sn', 'phone_number']:
if UA.get(key):
result[key] = UA.get(key)
except:
pass
md5 = gen_hashcode(result)
return md5, result, None
return None, None, None
| from utils import detect_string, gen_hashcode, jsonify_headers
import time
KERNEL_PANIC = [
r"(kernel BUG at[^\n]+)",
r"(spinlock bad magic)",
r"(Unable to handle kernel[^\n]+)",
r"(modem subsystem failure reason:.*Could not turn on the UNIV_STMR ustmr_qtimer_off_counter)",
r"(\w+ subsystem failure reason:[^\n]+)",
r"(Kernel panic - not syncing: [^\n]*: Timed out waiting for error ready: modem)",
r"(Kernel panic - [^\n]*)"
]
IGNORES = [
r"Crash injected via Diag",
r"SysRq : Trigger a crash"
]
def kernel_panic(logcat, headers):
parts = logcat.split('\n\n')
for content in parts[1:]:
process = "kernel"
for ignore in IGNORES:
if detect_string(content, ignore):
return None, None, None
for pattern in KERNEL_PANIC:
reason = detect_string(content, pattern)
if reason:
result = {'issue_owner': process, 'detail': reason}
if "should check the ramdump" in reason:
result["random"] = str(time.time())
try:
UA = jsonify_headers(headers.get('X-Dropbox-UA', '='))
for key in ['imei', 'mac_address', 'sn']:
if UA.get(key):
result[key] = UA.get(key)
except:
pass
md5 = gen_hashcode(result)
return md5, result, None
return None, None, None
| mit | Python |
63f996ae03cf3991d12070c20fe57e4cf4e8213d | bump version | timxx/gitc,timxx/gitc | qgitc/version.py | qgitc/version.py | # -*- coding: utf-8 -*-
VERSION_MAJOR = 3
VERSION_MINOR = 0
VERSION_PATCH = 1
VERSION = "{}.{}.{}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
| # -*- coding: utf-8 -*-
VERSION_MAJOR = 3
VERSION_MINOR = 0
VERSION_PATCH = 0
VERSION = "{}.{}.{}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
| apache-2.0 | Python |
b532e7e1c8e5fe1fa705e9fd438134ac1001d771 | Refactor estimator module | raviqqe/tensorflow-qnd,raviqqe/tensorflow-qnd | qnd/estimator.py | qnd/estimator.py | import functools
import inspect
import typing
import tensorflow as tf
import tensorflow.contrib.learn as learn
from . import util
from .config import def_config
def def_estimator(distributed=False):
config = def_config(distributed)
@util.func_scope
def estimator(model_fn, model_dir):
return tf.contrib.learn.Estimator(_wrap_model_fn(model_fn),
config=config(),
model_dir=model_dir)
return estimator
def _wrap_model_fn(original_model_fn):
@util.func_scope
def model(features, targets, mode):
are_args = functools.partial(util.are_instances, [features, targets])
def_model_fn = functools.partial(functools.partial, original_model_fn)
if are_args(tf.Tensor):
model_fn = def_model_fn(features, targets)
elif are_args(dict):
model_fn = def_model_fn(**features, **targets)
elif isinstance(features, tf.Tensor) and targets is None:
model_fn = def_model_fn(features)
elif isinstance(features, dict) and targets is None:
model_fn = def_model_fn(**features)
else:
raise ValueError(
"features and targets should be both tf.Tensor or dict.")
results = (
model_fn(mode=mode)
if "mode" in inspect.signature(model_fn).parameters.keys() else
model_fn())
return (
results
if isinstance(results, learn.estimators.model_fn.ModelFnOps) else
learn.estimators.model_fn.ModelFnOps(
mode,
*(results
if isinstance(results, typing.Sequence) else
(results,))))
return model
| import functools
import inspect
import tensorflow as tf
import tensorflow.contrib.learn as learn
from . import util
from .config import def_config
def def_estimator(distributed=False):
config = def_config(distributed)
@util.func_scope
def estimator(model_fn, model_dir):
return tf.contrib.learn.Estimator(_wrap_model_fn(model_fn),
config=config(),
model_dir=model_dir)
return estimator
def _wrap_model_fn(original_model_fn):
@util.func_scope
def model(features, targets, mode):
are_args = functools.partial(util.are_instances, [features, targets])
def_model_fn = functools.partial(functools.partial, original_model_fn)
if are_args(tf.Tensor):
model_fn = def_model_fn(features, targets)
elif are_args(dict):
model_fn = def_model_fn(**features, **targets)
elif isinstance(features, tf.Tensor) and targets is None:
model_fn = def_model_fn(features)
elif isinstance(features, dict) and targets is None:
model_fn = def_model_fn(**features)
else:
raise ValueError(
"features and targets should be both tf.Tensor or dict.")
results = (
model_fn(mode=mode)
if "mode" in inspect.signature(model_fn).parameters.keys() else
model_fn())
if (not isinstance(results, learn.estimators.model_fn.ModelFnOps)
and not isinstance(results, tuple)):
results = (results,)
return (
results
if isinstance(results, learn.estimators.model_fn.ModelFnOps) else
learn.estimators.model_fn.ModelFnOps(mode, *results)
)
return model
| unlicense | Python |
9c5bd21ae44f81255815ce0d184c4e1319e8f415 | Add GUI imports to qtui/__init__.py | metamarcdw/metamarket,IIIIIHIIIII/metamarket | qtui/__init__.py | qtui/__init__.py | from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from metamarket_qt import *
from view_ident_dialog import *
from view_offer_dialog import *
from view_market_dialog import *
from view_chanmsg_dialog import *
from import_market_dialog import *
from send_chanmsg_dialog import *
from about_dialog import *
| mit | Python | |
e1e6624b97455fe73cce951a7a934464a16d8197 | Update to utils to cope with either python 3 or 2 type strings. | TyMaszWeb/django-template-finder | templatefinder/utils.py | templatefinder/utils.py | import fnmatch
import logging
import os
from django.conf import settings
from django.utils.importlib import import_module
try:
from django.utils.six import string_types
except ImportError:
string_types = (basestring,)
__all__ = ('find_all_templates', 'flatten_template_loaders')
LOGGER = logging.getLogger('templatefinder')
def flatten_template_loaders(templates):
"""
Given a collection of template loaders, unwrap them into one flat iterable.
:param templates: template loaders to unwrap
:return: template loaders as an iterable of strings.
:rtype: generator expression
"""
for loader in templates:
if not isinstance(loader, string_types):
for subloader in flatten_template_loaders(loader):
yield subloader
else:
yield loader
def find_all_templates(pattern='*.html'):
"""
Finds all Django templates matching given glob in all TEMPLATE_LOADERS
:param str pattern: `glob <http://docs.python.org/2/library/glob.html>`_
to match
.. important:: At the moment egg loader is not supported.
"""
templates = []
template_loaders = flatten_template_loaders(settings.TEMPLATE_LOADERS)
for loader_name in template_loaders:
module, klass = loader_name.rsplit('.', 1)
if loader_name in (
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
):
loader = getattr(import_module(module), klass)()
for dir in loader.get_template_sources(''):
for root, dirnames, filenames in os.walk(dir):
for basename in filenames:
filename = os.path.join(root, basename)
rel_filename = filename[len(dir)+1:]
if fnmatch.fnmatch(filename, pattern) or \
fnmatch.fnmatch(basename, pattern) or \
fnmatch.fnmatch(rel_filename, pattern):
templates.append(rel_filename)
else:
LOGGER.debug('%s is not supported' % loader_name)
return sorted(set(templates))
| import fnmatch
import logging
import os
from django.conf import settings
from django.utils.importlib import import_module
__all__ = ('find_all_templates', 'flatten_template_loaders')
LOGGER = logging.getLogger('templatefinder')
def flatten_template_loaders(templates):
"""
Given a collection of template loaders, unwrap them into one flat iterable.
:param templates: template loaders to unwrap
:return: template loaders as an iterable of strings.
:rtype: generator expression
"""
for loader in templates:
if not isinstance(loader, basestring):
for subloader in flatten_template_loaders(loader):
yield subloader
else:
yield loader
def find_all_templates(pattern='*.html'):
"""
Finds all Django templates matching given glob in all TEMPLATE_LOADERS
:param str pattern: `glob <http://docs.python.org/2/library/glob.html>`_
to match
.. important:: At the moment egg loader is not supported.
"""
templates = []
template_loaders = flatten_template_loaders(settings.TEMPLATE_LOADERS)
for loader_name in template_loaders:
module, klass = loader_name.rsplit('.', 1)
if loader_name in (
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
):
loader = getattr(import_module(module), klass)()
for dir in loader.get_template_sources(''):
for root, dirnames, filenames in os.walk(dir):
for basename in filenames:
filename = os.path.join(root, basename)
rel_filename = filename[len(dir)+1:]
if fnmatch.fnmatch(filename, pattern) or \
fnmatch.fnmatch(basename, pattern) or \
fnmatch.fnmatch(rel_filename, pattern):
templates.append(rel_filename)
else:
LOGGER.debug('%s is not supported' % loader_name)
return sorted(set(templates))
| bsd-2-clause | Python |
8f5f162938d449c58cd8b6d6d379c38c7da7ae23 | Fix upgrade to schema version 21 for PostgreSQL databases. Closes #5593. | pkdevbox/trac,pkdevbox/trac,pkdevbox/trac,pkdevbox/trac | trac/upgrades/db21.py | trac/upgrades/db21.py |
def do_upgrade(env, ver, cursor):
"""Upgrade the reports to better handle the new workflow capabilities"""
db = env.get_db_cnx()
owner = db.concat('owner', "' *'")
cursor.execute('SELECT id, query, description FROM report')
reports = cursor.fetchall()
for report, query, description in reports:
# All states other than 'closed' are "active".
query = query.replace("IN ('new', 'assigned', 'reopened')",
"<> 'closed'")
# Add a status column instead of adding an '*' to the owner's name for
# the 'assigned' state.
query = query.replace("(CASE status WHEN 'assigned' THEN %s "
"ELSE owner END) AS owner" % owner, "owner, status")
description = description.replace(" * If a ticket has been accepted, "
"a '*' is appended after the "
"owner's name\n",
'')
cursor.execute("UPDATE report SET query=%s, description=%s "
"WHERE id=%s", (query, description, report))
|
def do_upgrade(env, ver, cursor):
"""Upgrade the reports to better handle the new workflow capabilities"""
db = env.get_db_cnx()
owner = db.concat('owner', "' *'")
reports = list(cursor.execute('SELECT id, query, description FROM report'))
for report, query, description in reports:
# All states other than 'closed' are "active".
query = query.replace("IN ('new', 'assigned', 'reopened')",
"<> 'closed'")
# Add a status column instead of adding an '*' to the owner's name for
# the 'assigned' state.
query = query.replace("(CASE status WHEN 'assigned' THEN %s "
"ELSE owner END) AS owner" % owner, "owner, status")
description = description.replace(" * If a ticket has been accepted, "
"a '*' is appended after the "
"owner's name\n",
'')
cursor.execute("UPDATE report SET query=%s, description=%s "
"WHERE id=%s", (query, description, report))
| bsd-3-clause | Python |
0db5c9b2cc9bed0f43931030269db19862c03ae5 | Remove support for writing config options. This also solves Issue 60 | liuyi1112/rst2pdf,pombreda/rst2pdf,pombreda/rst2pdf,rst2pdf/rst2pdf,liuyi1112/rst2pdf,rst2pdf/rst2pdf | rst2pdf/config.py | rst2pdf/config.py | # -*- coding: utf-8 -*-
# Singleton config object
import ConfigParser
import os
from simplejson import dumps, loads
cfdir=os.path.join(os.path.expanduser('~'),'.rst2pdf')
cfname=os.path.join(cfdir,'config')
def getValue(section,key,default=None):
section=section.lower()
key=key.lower()
try:
return loads(conf.get (section,key))
except:
return default
class ConfigError(Exception):
def __init__(self,modulename,msg):
self.modulename=modulename
self.msg=msg
conf=ConfigParser.SafeConfigParser()
if not os.path.isdir(cfdir):
os.mkdir(cfdir)
conf.read(cfname)
| # -*- coding: utf-8 -*-
# Singleton config object
import ConfigParser
import os
from simplejson import dumps, loads
cfdir=os.path.join(os.path.expanduser('~'),'.rst2pdf')
cfname=os.path.join(cfdir,'config')
def getValue(section,key,default=None):
section=section.lower()
key=key.lower()
try:
return loads(conf.get (section,key))
except:
return default
def setValue(section,key,value):
section=str(section)
key=str(key)
section=section.lower()
key=key.lower()
value=dumps(value)
try:
r=conf.set(section,key,value)
except ConfigParser.NoSectionError:
conf.add_section(section)
r=conf.set(section,key,value)
f=open(cfname,'w')
conf.write(f)
return r
class ConfigError(Exception):
def __init__(self,modulename,msg):
self.modulename=modulename
self.msg=msg
conf=ConfigParser.SafeConfigParser()
if not os.path.isdir(cfdir):
os.mkdir(cfdir)
if not os.path.isfile(cfname):
open(cfname, 'w').close()
f=open(cfname,'r')
conf.readfp(f)
f.close()
| mit | Python |
b7de4f92e72092cd8d83e0cbcf20be67a086bbe2 | refactor out common constructor code | davegoopot/ee-balance-checker,davegoopot/ee-balance-checker | ee_balance_test.py | ee_balance_test.py | import unittest
from ee_balance import *
class EEUnitTests(unittest.TestCase):
def setUp(self):
config = ""
with open("secret.auth","r") as f:
config = f.read()
self.eecon = EEConnector.construct_from_config(config)
def test_fetch_login_page(self):
login_page_response = self.eecon._fetch_login_page()
self.assertEqual(200, login_page_response.status_code)
def test_fetch_balance_json(self):
balance_page_response = self.eecon._fetch_balance_page()
self.assertEqual(200, balance_page_response.status_code)
balance_json_response = self.eecon._fetch_balance_json()
self.assertEqual(200, balance_json_response.status_code)
def test_start_session_from_config_file(self):
test_file_contents = """[auth]
username=uname
password=pword
"""
eeconnector = EEConnector.construct_from_config(test_file_contents)
self.assertEqual("uname", eeconnector.authentication_data['LOGIN'])
self.assertEqual("pword", eeconnector.authentication_data['PASSWORD'])
| import configparser
import unittest
from ee_balance import *
class EEUnitTests(unittest.TestCase):
def setUp(self):
config = configparser.ConfigParser()
config.read('secret.auth')
self.eecon = EEConnector(username=config['auth']['username'], password=config['auth']['password'])
def test_fetch_login_page(self):
login_page_response = self.eecon._fetch_login_page()
self.assertEqual(200, login_page_response.status_code)
def test_fetch_balance_json(self):
balance_page_response = self.eecon._fetch_balance_page()
self.assertEqual(200, balance_page_response.status_code)
balance_json_response = self.eecon._fetch_balance_json()
self.assertEqual(200, balance_json_response.status_code)
def test_start_session_from_config_file(self):
test_file_contents = """[auth]
username=uname
password=pword
"""
eeconnector = EEConnector.construct_from_config(test_file_contents)
self.assertEqual("uname", eeconnector.authentication_data['LOGIN'])
self.assertEqual("pword", eeconnector.authentication_data['PASSWORD'])
#refactor setUp
| agpl-3.0 | Python |
7b23f3956a5a05f6659684499e848a8c96270b53 | bump version to 0.4.0.b1 | jepegit/cellpy,jepegit/cellpy | cellpy/_version.py | cellpy/_version.py | version_info = (0, 4, 0, "b1")
__version__ = ".".join(map(str, version_info))
| version_info = (0, 4, 0, "a5")
__version__ = ".".join(map(str, version_info))
| mit | Python |
78f5006bd5182317200d3396481cd4afd9e95417 | Fix typo in forms.py. | ulule/django-linguist | linguist/forms.py | linguist/forms.py | # -*- coding: utf-8 -*_
from django import forms
from django.forms.models import BaseInlineFormSet
from .utils.i18n import get_language
__all__ = [
'TranslationModelFormMixin',
'TranslationModelForm',
]
class TranslationModelFormMixin(object):
language = None
def __init__(self, *args, **kwargs):
current_language = kwargs.pop('_current_language', None)
super(TranslationModelFormMixin, self).__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
if self.language is None:
if instance:
self.language = instance.language
return
self.language = current_language or get_language()
def _post_clean(self):
self.instance.language = self.language
super(TranslationModelFormMixin, self)._post_clean()
class TranslationModelForm(TranslationModelFormMixin, forms.ModelForm):
pass
class TranslationBaseInlineFormSet(BaseInlineFormSet):
language = None
def _construct_form(self, i, **kwargs):
form = super(TranslationBaseInlineFormSet, self)._construct_form(i, **kwargs)
form.language = self.language
return form
def save_new(self, form, commit=True):
obj = super(TranslationBaseInlineFormSet, self).save_new(form, commit)
return obj
| # -*- coding: utf-8 -*_
from django import forms
from django.forms.models import BaseInlineFormSet
from .utils.i18n import get_language
__all__ = [
'TranslationModelFormMixin',
'TranslationModelForm',
]
class TranslationModelFormMixin(object):
language = None
def __init__(self, *args, **kwargs):
current_language = kwargs.pop('_current_language', None)
super(TranslationModelFormMixin, self).__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
if self.language is None:
if instance:
self.language = instance.language
return
self.language_code = current_language or get_language()
def _post_clean(self):
self.instance.language = self.language
super(TranslationModelFormMixin, self)._post_clean()
class TranslationModelForm(TranslationModelFormMixin, forms.ModelForm):
pass
class TranslationBaseInlineFormSet(BaseInlineFormSet):
language = None
def _construct_form(self, i, **kwargs):
form = super(TranslationBaseInlineFormSet, self)._construct_form(i, **kwargs)
form.language = self.language
return form
def save_new(self, form, commit=True):
obj = super(TranslationBaseInlineFormSet, self).save_new(form, commit)
return obj
| mit | Python |
d6b8156d18a358b594ecf8efe4c6f9de26e4af16 | allow more tolerance | rlowrance/mlpack,rlowrance/mlpack | random_sample.py | random_sample.py | import numpy as np
import unittest
import pdb
def rand(d):
'''Generate random np.array sample of size d drawn uniformly from [0,1]'''
while True:
result = np.random.rand(d)
yield result
def randn(d, mean, var):
'''Generate random np.array sample of size d drawn from N(mean, var) '''
while True:
result = var * np.random.randn(d) + mean
yield result
class TestRand(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_generate(self):
for i in xrange(10):
x = next(rand(3))
if self.verbose:
print i, x
def test_iterate(self):
count = 0
for value in rand(5):
if self.verbose:
print value
count += 1
if count > 5:
break
class TestRandn(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_generate_0_1(self):
mean = 0
var = 1
n = 100
means = np.zeros(n)
for i in xrange(n):
x = next(randn(3, mean, var))
means[i] = np.mean(x)
if self.verbose:
print i, x
actual_mean = np.mean(means)
if self.verbose:
print 'actual_means', actual_mean
self.assertLess(abs(mean - actual_mean), .5)
def test_generate_10_100(self):
mean = 10
var = 100
n = 1000
means = np.zeros(n)
for i in xrange(n):
x = next(randn(3, mean, var))
means[i] = np.mean(x)
if self.verbose:
print i, x
actual_mean = np.mean(means)
if self.verbose:
print 'actual_mean', actual_mean
self.assertLess(abs(mean - actual_mean), 5)
if __name__ == '__main__':
if False:
pdb.set_trace()
unittest.main()
| import numpy as np
import unittest
import pdb
def rand(d):
'''Generate random np.array sample of size d drawn uniformly from [0,1]'''
while True:
result = np.random.rand(d)
yield result
def randn(d, mean, var):
'''Generate random np.array sample of size d drawn from N(mean, var) '''
while True:
result = var * np.random.randn(d) + mean
yield result
class TestRand(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_generate(self):
for i in xrange(10):
x = next(rand(3))
if self.verbose:
print i, x
def test_iterate(self):
count = 0
for value in rand(5):
if self.verbose:
print value
count += 1
if count > 5:
break
class TestRandn(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_generate_0_1(self):
mean = 0
var = 1
n = 100
means = np.zeros(n)
for i in xrange(n):
x = next(randn(3, mean, var))
means[i] = np.mean(x)
if self.verbose:
print i, x
actual_mean = np.mean(means)
if self.verbose:
print 'actual_means', actual_mean
self.assertLess(abs(mean - actual_mean), .3)
def test_generate_10_100(self):
mean = 10
var = 100
n = 1000
means = np.zeros(n)
for i in xrange(n):
x = next(randn(3, mean, var))
means[i] = np.mean(x)
if self.verbose:
print i, x
actual_mean = np.mean(means)
if self.verbose:
print 'actual_mean', actual_mean
self.assertLess(abs(mean - actual_mean), 3)
if __name__ == '__main__':
if False:
pdb.set_trace()
unittest.main()
| mit | Python |
ec15cfe29eb4f99725315d54c97d99d81e3b3ccc | Enable Config instantiation from kwargs only | oleiade/Elevator | elevator/config.py | elevator/config.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
from ConfigParser import ConfigParser
from utils.snippets import items_to_dict
class Config(dict):
"""
Unix shells like environment class. Implements add,
get, load, flush methods. Handles lists of values too.
Basically Acts like a basic key/value store.
"""
def __init__(self, f=None, *args, **kwargs):
if f:
self.update_with_file(f) # Has to be called last!
self.update(kwargs)
dict.__init__(self, *args, **kwargs)
def update_with_file(self, f):
"""
Updates the environment using an ini file containing
key/value descriptions.
"""
config = ConfigParser()
with open(f, 'r') as f:
config.readfp(f)
for section in config.sections():
self.update(items_to_dict(config.items(section)))
def reload_from_file(self, f=''):
self.flush(f)
self.load(f)
def update_with_args(self, args):
"""Loads argparse kwargs into environment, as `section`"""
for (arg, value) in args:
if value is not None:
self[arg] = value
def flush(self):
"""
Flushes the environment from it's manually
set attributes.
"""
for attr in self.attributes:
delattr(self, attr)
| # -*- coding: utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
from ConfigParser import ConfigParser
from utils.snippets import items_to_dict
class Config(dict):
"""
Unix shells like environment class. Implements add,
get, load, flush methods. Handles lists of values too.
Basically Acts like a basic key/value store.
"""
def __init__(self, f, *args, **kwargs):
if f:
self.update_with_file(f) # Has to be called last!
self.update(kwargs)
dict.__init__(self, *args, **kwargs)
def update_with_file(self, f):
"""
Updates the environment using an ini file containing
key/value descriptions.
"""
config = ConfigParser()
with open(f, 'r') as f:
config.readfp(f)
for section in config.sections():
self.update(items_to_dict(config.items(section)))
def reload_from_file(self, f=''):
self.flush(f)
self.load(f)
def update_with_args(self, args):
"""Loads argparse kwargs into environment, as `section`"""
for (arg, value) in args:
if value is not None:
self[arg] = value
def flush(self):
"""
Flushes the environment from it's manually
set attributes.
"""
for attr in self.attributes:
delattr(self, attr)
| mit | Python |
eb5ea3eaa830482471ede267a57902c093b8a6d6 | Update : add some output server runner | oleiade/Elevator | elevator/server.py | elevator/server.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import zmq
import leveldb
import threading
from database import Backend, Frontend
def run():
backend = Backend('test')
frontend = Frontend('tcp://127.0.0.1:4141')
poll = zmq.Poller()
poll.register(backend.socket, zmq.POLLIN)
poll.register(frontend.socket, zmq.POLLIN)
try:
print >> sys.stdout, "Elevator server started"
print >> sys.stdout, "The server is now ready to accept connections on port 4141"
while True:
sockets = dict(poll.poll())
if frontend.socket in sockets:
if sockets[frontend.socket] == zmq.POLLIN:
msg = frontend.socket.recv_multipart()
backend.socket.send_multipart(msg)
if backend.socket in sockets:
if sockets[backend.socket] == zmq.POLLIN:
msg = backend.socket.recv_multipart()
frontend.socket.send_multipart(msg)
except KeyboardInterrupt:
del backend
del frontend
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
import zmq
import leveldb
import threading
from database import Backend, Frontend
def run():
backend = Backend('test')
frontend = Frontend('tcp://127.0.0.1:4141')
poll = zmq.Poller()
poll.register(backend.socket, zmq.POLLIN)
poll.register(frontend.socket, zmq.POLLIN)
try:
while True:
sockets = dict(poll.poll())
if frontend.socket in sockets:
if sockets[frontend.socket] == zmq.POLLIN:
msg = frontend.socket.recv_multipart()
backend.socket.send_multipart(msg)
if backend.socket in sockets:
if sockets[backend.socket] == zmq.POLLIN:
msg = backend.socket.recv_multipart()
frontend.socket.send_multipart(msg)
except KeyboardInterrupt:
del backend
del frontend
| mit | Python |
debbfdbcf66d8ba015a878a19a8ddcb1531fcbf9 | check github acc v.2 | DTailor/trolly,DTailor/trolly | trolly/faver/views.py | trolly/faver/views.py | from django.shortcuts import render_to_response
from django.http import HttpResponse
from .models import GeoLocation, StopTime, StationStop
from django.core import serializers
import json
from .utils import get_normalized_time
def index(request):
stations = serializers.serialize(
'json', GeoLocation.objects.all(), fields=('lat', 'long', 'name'))
return render_to_response('pages/index.html', {'locations': stations})
def show_schedule(request):
schedule = dict()
st_lat = request.GET.get('latitude', '46.9773091')
st_lon = request.GET.get('longitude', '28.8706002')
geo_point = GeoLocation.objects.get(lat=st_lat, long=st_lon)
station_stops = StationStop.objects.filter(location=geo_point)
now = get_normalized_time()
stop_times = StopTime.objects.filter(
station__in=station_stops, time__gte=now)
for stop_time in stop_times:
pass
return schedule
def get_station_schedule(request):
if request.method == 'GET':
geo_id = request.GET.get('station_id', False)
if geo_id:
geo_point = GeoLocation.objects.get(id=int(geo_id))
station_stops = StationStop.objects.filter(location=geo_point)
now = get_normalized_time()
stop_times = StopTime.objects.filter(
station__in=station_stops, time__gte=now).order_by('time')[:10]
stop_times_data = []
for stop_time in stop_times:
tmp_dict = {stop_time.route.nr: "{0:02d}:{1:02d}".format(
stop_time.time.hour, stop_time.time.minute)}
stop_times_data.append(tmp_dict)
data = {'schedule': stop_times_data, 'station': geo_point.name}
data = json.dumps(data)
return HttpResponse(data)
return HttpResponse(status=404)
| from django.shortcuts import render_to_response
from django.http import HttpResponse
from .models import GeoLocation, StopTime, StationStop
from django.core import serializers
from .utils import get_normalized_time
import json
def index(request):
stations = serializers.serialize(
'json', GeoLocation.objects.all(), fields=('lat', 'long', 'name'))
return render_to_response('pages/index.html', {'locations': stations})
def show_schedule(request):
schedule = dict()
st_lat = request.GET.get('latitude', '46.9773091')
st_lon = request.GET.get('longitude', '28.8706002')
geo_point = GeoLocation.objects.get(lat=st_lat, long=st_lon)
station_stops = StationStop.objects.filter(location=geo_point)
now = get_normalized_time()
stop_times = StopTime.objects.filter(
station__in=station_stops, time__gte=now)
for stop_time in stop_times:
pass
return schedule
def get_station_schedule(request):
if request.method == 'GET':
geo_id = request.GET.get('station_id', False)
if geo_id:
geo_point = GeoLocation.objects.get(id=int(geo_id))
station_stops = StationStop.objects.filter(location=geo_point)
now = get_normalized_time()
stop_times = StopTime.objects.filter(
station__in=station_stops, time__gte=now).order_by('time')[:10]
stop_times_data = []
for stop_time in stop_times:
tmp_dict = {stop_time.route.nr: "{0:02d}:{1:02d}".format(
stop_time.time.hour, stop_time.time.minute)}
stop_times_data.append(tmp_dict)
data = {'schedule': stop_times_data, 'station': geo_point.name}
data = json.dumps(data)
return HttpResponse(data)
return HttpResponse(status=404)
| mit | Python |
fa1909f13414dd8106f7624f264945957b5308df | fix building in python 2 | jaimeMF/pyucl | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyucl'
author = 'Jaime Marquínez Ferrándiz'
copyright = '2015, {}'.format(author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyucldoc'
# Options for the html theme
html_theme_options = {
'github_user': 'jaimeMF',
'github_repo': 'pyucl',
'github_banner': True,
}
| # -*- coding: utf-8 -*-
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyucl'
author = 'Jaime Marquínez Ferrándiz'
copyright = '2015, {}'.format(author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyucldoc'
# Options for the html theme
html_theme_options = {
'github_user': 'jaimeMF',
'github_repo': 'pyucl',
'github_banner': True,
}
| unlicense | Python |
2e88589c8da300cf06616b0e77978f86a665bac5 | Bump version to 1.3.0 | dmulholland/syntex,dmulholland/syntex | monk/__init__.py | monk/__init__.py | # ------------------------------------------------------------------------------
# Monk: a lightweight, markdownish markup language.
#
# Author: Darren Mulholland <darren@mulholland.xyz>
# License: Public Domain
# ------------------------------------------------------------------------------
# Package version number.
__version__ = "1.3.0"
from .interface import render
from .interface import main
from . import tags
from . import nodes
from . import parsers
from . import escapes
from . import utils
| # ------------------------------------------------------------------------------
# Monk: a lightweight, markdownish markup language.
#
# Author: Darren Mulholland <darren@mulholland.xyz>
# License: Public Domain
# ------------------------------------------------------------------------------
# Package version number.
__version__ = "1.2.2"
from .interface import render
from .interface import main
from . import tags
from . import nodes
from . import parsers
from . import escapes
from . import utils
| unlicense | Python |
0ce7e598bf4ca6bcae1ca874c5f20081182e91f9 | test stop service | iottly/iottly-device-agent-py,iottly/iottly-device-agent-py | iottly-device-agent-py/main.py | iottly-device-agent-py/main.py | """
Copyright 2015 Stefano Terna
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import json
import logging
from iottly import rpi_agent
def main():
#define as many loop functions
#loop functions are being runned in an infinite loop
def loop1():
logging.info('loop1')
#msg is a dictionary (json):
msg = {"timerevent": {"loop1message":1}}
agent.send_msg(msg)
time.sleep(1)
def loop2():
logging.info('loop2')
#msg is a dictionary (json):
msg = {"timerevent": {"loop1message":2}}
agent.send_msg(msg)
time.sleep(1)
#define the callback to receive messages from broker:
def new_message(msg):
#received message is a dictionary
logging.info(msg)
agent.send_msg(msg)
if "ECHO" in msg.keys():
agent.close()
#instantiate the agent passing:
# - the message callback
# - a list with the loop functions
agent = rpi_agent.RPiIottlyAgent(new_message, [loop1, loop2])
agent.start()
if __name__ == '__main__':
main()
| """
Copyright 2015 Stefano Terna
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import json
import logging
from iottly import rpi_agent
def main():
#define as many loop functions
#loop functions are being runned in an infinite loop
def loop1():
logging.info('loop1')
#msg is a dictionary (json):
msg = {"timerevent": {"loop1message":1}}
agent.send_msg(msg)
time.sleep(1)
def loop2():
logging.info('loop2')
#msg is a dictionary (json):
msg = {"timerevent": {"loop1message":2}}
agent.send_msg(msg)
time.sleep(1)
#define the callback to receive messages from broker:
def new_message(msg):
#received message is a dictionary
logging.info(msg)
agent.send_msg(msg)
#instantiate the agent passing:
# - the message callback
# - a list with the loop functions
agent = rpi_agent.RPiIottlyAgent(new_message, [loop1, loop2])
agent.start()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
9e394259cdb61eb34cf9f170f161083e18c2b981 | Bump version to 0.0.8 | portfoliome/pgawedge | pgawedge/_version.py | pgawedge/_version.py | version_info = (0, 0, 8)
__version__ = '.'.join(map(str, version_info))
| version_info = (0, 0, 7)
__version__ = '.'.join(map(str, version_info))
| mit | Python |
d19da9627ca84b6627347647e7829e43e39a1576 | add function includes() | plepe/pgmapcss,plepe/pgmapcss | pgmapcss/includes.py | pgmapcss/includes.py | _includes = {}
def register_includes(inc):
global _includes
_includes = dict(list(_includes.items()) + list(inc.items()))
def include_text():
global _includes
ret = ''
for name, function in _includes.items():
ret += function
return ret
def includes():
global _includes
print(_includes)
return _includes
| includes = {}
def register_includes(inc):
global includes
includes = dict(list(includes.items()) + list(inc.items()))
def include_text():
global includes
ret = ''
for name, function in includes.items():
ret += function
return ret
| agpl-3.0 | Python |
b8ffb619a26e27f1376bf8a39e752cdf2eb3afac | Make copyright year consistent with license | astropy/astropy-helpers,dpshelio/astropy-helpers,dpshelio/astropy-helpers,astropy/astropy-helpers,Cadair/astropy-helpers,Cadair/astropy-helpers | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
project = 'astropy-helpers'
copyright = '2014, The Astropy Developers'
author = 'The Astropy Developers'
# We need to get the version number from the package
import sys # noqa
sys.path.insert(0, '..')
import astropy_helpers # noqa
version = astropy_helpers.__version__
release = astropy_helpers.__version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'numpydoc',
'sphinx_automodapi.automodapi'
]
numpydoc_show_class_members = False
intersphinx_mapping = {'https://docs.python.org/': None}
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap-astropy'
html_theme_options = {
'logotext1': 'astropy', # white, semi-bold
'logotext2': '-helpers', # orange, light
'logotext3': ':docs' # white, light
}
| # -*- coding: utf-8 -*-
project = 'astropy-helpers'
copyright = '2019, The Astropy Developers'
author = 'The Astropy Developers'
# We need to get the version number from the package
import sys # noqa
sys.path.insert(0, '..')
import astropy_helpers # noqa
version = astropy_helpers.__version__
release = astropy_helpers.__version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'numpydoc',
'sphinx_automodapi.automodapi'
]
numpydoc_show_class_members = False
intersphinx_mapping = {'https://docs.python.org/': None}
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap-astropy'
html_theme_options = {
'logotext1': 'astropy', # white, semi-bold
'logotext2': '-helpers', # orange, light
'logotext3': ':docs' # white, light
}
| bsd-3-clause | Python |
143233cf237a9f8e45718f4860e8b4ec5c4e1a6c | Make regtab a class attribute | mossberg/spym,mossberg/spym | registers.py | registers.py | class Registers(object):
regtab = {
'v0': 2, 'v1': 3,
'a0': 4, 'a1': 5, 'a2': 6, 'a3': 7,
't0': 8, 't1': 9, 't2': 10, 't3': 11, 't4': 12, 't5': 13, 't6': 14, 't7': 15,
's0': 16, 's1': 17, 's2': 18, 's3': 19, 's4': 20, 's5': 21, 's6': 22, 's7': 23,
't8': 24, 't9': 25,
'k0': 26, 'k1': 27,
'gp': 28,
'sp': 29,
'fp': 30,
'ra': 31,
'zero': 0
}
def __init__(self):
self.registers = [0 for _ in xrange(32)]
def read(self, reg):
ind = reg if type(reg) is int else self.regtab[reg]
return self.registers[ind]
def write(self, reg, contents):
ind = reg if type(reg) is int else self.regtab[reg]
if ind == 0:
raise Exception('can\'t write to $zero')
self.registers[ind] = contents
def dump(self):
for reg in sorted(self.regtab):
print '${}/{} : {}\t\t'.format(reg, self.regtab[reg],
self.registers[self.regtab[reg]])
| class Registers(object):
def __init__(self):
self.registers = [0 for _ in xrange(32)]
self.regtab = {
'v0': 2, 'v1': 3,
'a0': 4, 'a1': 5, 'a2': 6, 'a3': 7,
't0': 8, 't1': 9, 't2': 10, 't3': 11, 't4': 12, 't5': 13, 't6': 14, 't7': 15,
's0': 16, 's1': 17, 's2': 18, 's3': 19, 's4': 20, 's5': 21, 's6': 22, 's7': 23,
't8': 24, 't9': 25,
'k0': 26, 'k1': 27,
'gp': 28,
'sp': 29,
'fp': 30,
'ra': 31,
'zero': 0
}
def read(self, reg):
ind = reg if type(reg) is int else self.regtab[reg]
return self.registers[ind]
def write(self, reg, contents):
ind = reg if type(reg) is int else self.regtab[reg]
if ind == 0:
raise Exception('can\'t write to $zero')
self.registers[ind] = contents
def dump(self):
for reg in sorted(self.regtab):
print '${}/{} : {}\t\t'.format(reg, self.regtab[reg],
self.registers[self.regtab[reg]])
| mit | Python |
4f26f73d7206d052307ac8ceaf6fef6b472d8162 | move pikelangprogram parser to init | groteworld/pikalang,grotewold/pikalang | pikalang/__init__.py | pikalang/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pikalang module.
A brainfuck derivative based off the vocabulary of Pikachu from Pokemon.
Copyright (c) 2019 Blake Grotewold
"""
import sys
import os
from pikalang.parser import PikalangParser
MAX_LOCATIONS = 20
class PikalangProgram:
def __init__(self, source):
self.source = source
self.parser = PikalangParser()
def run(self):
self.data = [0] * MAX_LOCATIONS
self.location = 0
commands = self.parse(self.source)
commands.run(self)
def parse(self, source):
return self.parser.parse(source)
def __str__(self):
return str(self.parse(self.source))
def load_source(file):
if os.path.isfile(file):
if os.path.splitext(file)[1] == ".pokeball":
with open(file, "r") as pikalang_file:
pikalang_data = pikalang_file.read()
return pikalang_data
else:
print("pikalang: file is not a pokeball", file=sys.stderr)
return False
else:
print("pikalang: file does not exist", file=sys.stderr)
return False
def evaluate(source):
"""Run Pikalang system."""
program = PikalangProgram(source)
program.run()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pikalang module.
A brainfuck derivative based off the vocabulary of Pikachu from Pokemon.
Copyright (c) 2019 Blake Grotewold
"""
import sys
import os
from pikalang.parser import PikalangParser
MAX_LOCATIONS = 20
class PikalangProgram:
def __init__(self, source):
self.source = source
def run(self):
self.data = [0] * MAX_LOCATIONS
self.location = 0
commands = self.parse(self.source)
commands.run(self)
def parse(self, source):
parser = PikalangParser()
return parser.parse(source)
def __str__(self):
return str(self.parse(self.source))
def load_source(file):
if os.path.isfile(file):
if os.path.splitext(file)[1] == ".pokeball":
with open(file, "r") as pikalang_file:
pikalang_data = pikalang_file.read()
return pikalang_data
else:
print("pikalang: file is not a pokeball", file=sys.stderr)
return False
else:
print("pikalang: file does not exist", file=sys.stderr)
return False
def evaluate(source):
"""Run Pikalang system."""
program = PikalangProgram(source)
program.run()
| mit | Python |
9dd39bf0ea32f51bac3e614f28ce69276c8955d4 | add max_content_length to default_settings | datagovuk/ckanext-archiver,ckan/ckanext-archiver,datagovuk/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,datagovuk/ckanext-archiver,ckan/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,ckan/ckanext-archiver | ckanext/archiver/default_settings.py | ckanext/archiver/default_settings.py | # path to ckan config file
CKAN_CONFIG = '/home/okfn/pyenv/src/ckan/ckan.ini'
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
# Use this user name when requesting data from ckan
ARCHIVE_USER = u'okfn_maintenance'
# Max content-length of archived files, larger files will be ignored
MAX_CONTENT_LENGTH = 500000
| # path to ckan config file
CKAN_CONFIG = '/home/okfn/pyenv/src/ckan/ckan.ini'
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
# Use this user name when requesting data from ckan
ARCHIVE_USER = u'okfn_maintenance'
| mit | Python |
64172679bd860119f1ecb8960542160bd7b8670e | Kill eventlet after successful registration | royragsdale/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,picoCTF/picoCTF | picoCTF-web/tests/load/registration.py | picoCTF-web/tests/load/registration.py | """Register various types of users and store their login credentials."""
import random
import uuid
from locust import HttpLocust, TaskSet, task
from locust.exception import StopLocust
from demographics_generator import get_affiliation, get_country_code, get_user_type, get_username, get_password, get_email, get_demographics
MONGO_HOST = "127.0.0.1"
MONGO_PORT = 27017
MONGO_USER = None
MONGO_PASS = None
API_BASE_URL = 'api/v1'
REGISTRATION_ENDPOINT = API_BASE_URL + '/users'
def generate_user():
"""Generate a set of valid demographics for the given user type."""
user_fields = {
'username': get_username(),
'password': 'password',
'email': get_email(),
'affiliation': get_affiliation(),
'country': get_country_code(),
'usertype': get_user_type(),
'demo': get_demographics(),
}
return user_fields
class RegistrationTasks(TaskSet):
@task(weight=10)
def successfully_register(l):
user_demographics = generate_user()
l.client.post(REGISTRATION_ENDPOINT, json=user_demographics)
raise StopLocust # Terminate after successful registration
@task(weight=1)
def registration_error(l):
user_demographics = generate_user()
user_demographics['username'] = ''
with l.client.post(REGISTRATION_ENDPOINT,
json=user_demographics, catch_response=True) as res:
if res.status_code == 400:
res.success()
class RegistrationLocust(HttpLocust):
task_set = RegistrationTasks
min_wait = 1000
max_wait = 4000
| """Register various types of users and store their login credentials."""
import random
import uuid
from locust import HttpLocust, TaskSet, task
from demographics_generator import get_affiliation, get_country_code, get_user_type, get_username, get_password, get_email, get_demographics
MONGO_HOST = "127.0.0.1"
MONGO_PORT = 27017
MONGO_USER = None
MONGO_PASS = None
USERS_TO_REGISTER = 200
BASIC_AUTH_USERNAME = 'pico'
BASIC_AUTH_PASSWORD = '94b98f68b2cea8eec7dcd20f812380e2f10fb4acffc88860e81fb1915ef7a3be'
API_BASE_URL = 'api/v1'
REGISTRATION_ENDPOINT = API_BASE_URL + '/users'
def generate_user():
"""Generate a set of valid demographics for the given user type."""
user_fields = {
'username': get_username(),
'password': 'password',
'email': get_email(),
'affiliation': get_affiliation(),
'country': get_country_code(),
'usertype': get_user_type(),
'demo': get_demographics(),
}
return user_fields
class RegistrationTasks(TaskSet):
@task(weight=10)
def successfully_register(l):
user_demographics = generate_user()
l.client.post(REGISTRATION_ENDPOINT, json=user_demographics)
@task(weight=1)
def registration_error(l):
user_demographics = generate_user()
user_demographics['username'] = ''
with l.client.post(REGISTRATION_ENDPOINT,
json=user_demographics, catch_response=True) as res:
if res.status_code == 400:
res.success()
class RegistrationLocust(HttpLocust):
task_set = RegistrationTasks
min_wait = 1000
max_wait = 4000
| mit | Python |
94e054b0d8b72cdc61a5b45b89010e183c2888b7 | Fix for PEP8 | danielkoster/argosd | argosd/argosd.py | argosd/argosd.py | import sys
import logging
import signal
from queue import PriorityQueue
from peewee import *
from argosd import settings
from argosd.scheduling import TaskScheduler, TaskRunner
from argosd.models import Show, Episode
from argosd.api.app import Api
class ArgosD:
"""Main ArgosD class. Starts all runners and processes."""
queue = None
taskscheduler = None
taskrunner = None
api = None
def __init__(self):
self.queue = PriorityQueue()
self.taskscheduler = TaskScheduler(self.queue)
self.taskrunner = TaskRunner(self.queue)
self.api = Api()
def run(self):
"""Starts all runners and processes."""
logging.info('ArgosD starting')
self._create_database()
self.taskscheduler.run()
self.taskrunner.run()
self.api.run()
# Stop everything when a SIGTERM is received
signal.signal(signal.SIGTERM, self._handle_signal)
logging.info('ArgosD running')
# Wait for a signal. This causes our main thread to remain alive,
# which is needed to properly process any signals.
signal.pause()
@staticmethod
def _create_database():
database = SqliteDatabase('{}/argosd.db'.format(settings.ARGOSD_PATH))
database.connect()
database.create_tables([Show, Episode], safe=True)
database.close()
def _handle_signal(self, signum, frame):
del frame # Unused
if signum == signal.SIGTERM:
self.stop()
def stop(self):
"""Stops all runners and processes."""
logging.info('ArgosD stopping')
# Tell the scheduler to stop
logging.info('Telling taskscheduler to stop')
self.taskscheduler.stop()
# Tell the taskrunner to stop
logging.info('Telling taskrunner to stop')
self.taskrunner.stop()
# Tell the api to stop
logging.info('Telling API to stop')
self.api.stop()
logging.info('ArgosD stopped')
sys.exit(0)
| import sys
import logging
import signal
from queue import PriorityQueue
from peewee import *
from argosd import settings
from argosd.scheduling import TaskScheduler, TaskRunner
from argosd.models import Show, Episode
from argosd.api.app import Api
class ArgosD:
"""Main ArgosD class. Starts all runners and processes."""
queue = None
taskscheduler = None
taskrunner = None
api = None
def __init__(self):
self.queue = PriorityQueue()
self.taskscheduler = TaskScheduler(self.queue)
self.taskrunner = TaskRunner(self.queue)
self.api = Api()
def run(self):
"""Starts all runners and processes."""
logging.info('ArgosD starting')
self._create_database()
self.taskscheduler.run()
self.taskrunner.run()
self.api.run()
# Stop everything when a SIGTERM is received
signal.signal(signal.SIGTERM, self._handle_signal)
logging.info('ArgosD running')
# Wait for a signal. This causes our main thread to remain alive,
# which is needed to properly process any signals.
signal.pause()
@staticmethod
def _create_database():
database = SqliteDatabase('{}/argosd.db'.format(settings.ARGOSD_PATH))
database.connect()
database.create_tables([Show, Episode], safe=True)
database.close()
def _handle_signal(self, signum, frame):
del frame # Unused
if signum == signal.SIGTERM:
self.stop()
def stop(self):
"""Stops all runners and processes."""
logging.info('ArgosD stopping')
# Tell the scheduler to stop
logging.info('Telling taskscheduler to stop')
self.taskscheduler.stop()
# Tell the taskrunner to stop
logging.info('Telling taskrunner to stop')
self.taskrunner.stop()
# Tell the api to stop
logging.info('Telling API to stop')
self.api.stop()
logging.info('ArgosD stopped')
sys.exit(0)
| mit | Python |
03bef8e094032c2f8a10236166e67ed69f8fcbd2 | Fix missing closing quotes in translate help message | thomasleese/smartbot-old,tomleese/smartbot,Muzer/smartbot,Cyanogenoid/smartbot | plugins/translate.py | plugins/translate.py | import sys
import re
from textblob import TextBlob
class Plugin:
matcher = re.compile(r'translate (?:from ([^ ]+) )?(?:to ([^ ]+) )?(.*)')
def on_command(self, bot, msg, stdin, stdout, reply):
match = self.matcher.match(msg["message"])
if not match:
return
from_lang = match.group(1) or None # let autodetect decide
to_lang = match.group(2) or "en"
message = TextBlob(match.group(3))
try:
translated = message.translate(from_lang=from_lang, to=to_lang)
except:
return
print(translated, file=stdout)
def on_help(self):
return "Usage: translate [from <language>] [to <language>] <text>"
| import sys
import re
from textblob import TextBlob
class Plugin:
matcher = re.compile(r'translate (?:from ([^ ]+) )?(?:to ([^ ]+) )?(.*)')
def on_command(self, bot, msg, stdin, stdout, reply):
match = self.matcher.match(msg["message"])
if not match:
return
from_lang = match.group(1) or None # let autodetect decide
to_lang = match.group(2) or "en"
message = TextBlob(match.group(3))
try:
translated = message.translate(from_lang=from_lang, to=to_lang)
except:
return
print(translated, file=stdout)
def on_help(self):
return "Usage: translate [from <language>] [to <language>] <text>
| mit | Python |
c455c68661af2e14cdfa44bbd2fbde81dd065c8e | improve multidownload header | keatonb/PTFViewer | multidownload.py | multidownload.py | #!/usr/bin/python
"""
Script to download Palomar Transient Factory light curves for a list of targets
for visualization with PTFViewer: https://github.com/keatonb/PTFViewer
input csv file should have format:
targetname,rad,decd
where rad and decd are the RA and Dec in decimal degrees.
WARNING: Downloads data for nearest PTF source to given coordinates, not
necessarily for the target you want.
Learn more at https://github.com/keatonb/PTFViewer
@author: keatonb
"""
from __future__ import print_function
import sys
import os
import csv
from astropy.coordinates import SkyCoord
from PTFViewer import download_ptf
nargs = len(sys.argv)
if nargs < 2:
print('usage: python multidownload input_file.csv [/data/directory]')
sys.exit()
datadir = os.getcwd()+'/data/'
if len(sys.argv) > 2:
datadir = sys.argv[2]
if datadir[-1] != '/':
datadir += '/'
if not os.path.exists(datadir):
datadir = os.getcwd()+'/data/'
print(('Created data directory at '+datadir))
if not os.path.exists(datadir):
os.makedirs(datadir)
print(('Saving data to '+datadir))
csvname = sys.argv[1]
with open(csvname) as csvfile:
myCSVReader = csv.DictReader(csvfile, fieldnames=['name','ra','dec'],delimiter=",", quotechar='"')
for row in myCSVReader:
coords = SkyCoord(float(row['ra']),float(row['dec']),frame='icrs', unit='deg')
try:
download_ptf(coords,name=row['name'],directory=datadir)
print("Data saved to "+datadir+row['name']+'.xml')
except:
print("No data found at: "+coords.to_string())
| #!/usr/bin/python
"""
Script to download Palomar Transient Factory light curves for a list of targets
for visualization with PTFViewer: https://github.com/keatonb/PTFViewer
input csv file should have format:
targetname,rad,decd
where rad and decd are the RA and Dec in decimal degrees.
WARNING: Downloads data for nearest PTF source to given coordinates, not
necessarily for the target you want.
Learn more at https://github.com/keatonb/PTFViewer
@author: keatonb
"""
from __future__ import print_function
import sys
import os
import csv
from astropy.coordinates import SkyCoord
from PTFViewer import download_ptf
nargs = len(sys.argv)
if nargs < 2:
print('usage: python PTF_downloadlist input_file.csv [/data/directory]')
sys.exit()
datadir = os.getcwd()+'/data/'
if len(sys.argv) > 2:
datadir = sys.argv[2]
if datadir[-1] != '/':
datadir += '/'
if not os.path.exists(datadir):
datadir = os.getcwd()+'/data/'
print(('Created data directory at '+datadir))
if not os.path.exists(datadir):
os.makedirs(datadir)
print(('Saving data to '+datadir))
csvname = sys.argv[1]
with open(csvname) as csvfile:
myCSVReader = csv.DictReader(csvfile, fieldnames=['name','ra','dec'],delimiter=",", quotechar='"')
for row in myCSVReader:
coords = SkyCoord(float(row['ra']),float(row['dec']),frame='icrs', unit='deg')
try:
download_ptf(coords,name=row['name'],directory=datadir)
print("Data saved to "+datadir+row['name']+'.xml')
except:
print("No data found at: "+coords.to_string())
| mit | Python |
e424a285eb7d326bae8b2a7948778c323d0c29e2 | Add BondViewSet | bsmukasa/bond_analytics | bond_analytics_project/bondapi/views.py | bond_analytics_project/bondapi/views.py | from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from models import Bond
from serializers import BondSerializer
class BondViewSet(ModelViewSet):
serializer_class = BondSerializer
queryset = Bond.objects.all()
| from django.shortcuts import render
# Create your views here.
| mit | Python |
ec71948a3f1789d374b6f477ff50ad3ae3405285 | Make string check python 2 and 3 compatible. | ui/django-post_office,jrief/django-post_office,RafRaf/django-post_office,ekohl/django-post_office,fapelhanz/django-post_office,JostCrow/django-post_office,yprez/django-post_office,ui/django-post_office | post_office/admin.py | post_office/admin.py | from django.contrib import admin
from django.forms.widgets import TextInput
from django.utils.text import Truncator
from six import string_types
from .fields import CommaSeparatedEmailField
from .models import Email, Log, EmailTemplate, STATUS
def get_message_preview(instance):
return (u'{0}...'.format(instance.message[:25]) if len(instance.message) > 25
else instance.message)
get_message_preview.short_description = 'Message'
class LogInline(admin.StackedInline):
model = Log
extra = 0
class CommaSeparatedEmailWidget(TextInput):
def __init__(self, *args, **kwargs):
super(CommaSeparatedEmailWidget, self).__init__(*args, **kwargs)
self.attrs.update({'class': 'vTextField'})
def _format_value(self, value):
# If the value is a string wrap it in a list so it does not get sliced.
if isinstance(value, string_types):
value = [value, ]
return ','.join([item for item in value])
def requeue(modeladmin, request, queryset):
"""An admin action to requeue emails."""
queryset.update(status=STATUS.queued)
requeue.short_description = 'Requeue selected emails'
class EmailAdmin(admin.ModelAdmin):
list_display = ('id', 'to_display', 'subject', 'template',
'status', 'last_updated')
inlines = [LogInline]
list_filter = ['status']
formfield_overrides = {
CommaSeparatedEmailField: {'widget': CommaSeparatedEmailWidget}
}
actions = [requeue]
def get_queryset(self, request):
return super(EmailAdmin, self).get_queryset(request).select_related('template')
def to_display(self, instance):
return ', '.join(instance.to)
to_display.short_description = 'to'
to_display.admin_order_field = 'to'
class LogAdmin(admin.ModelAdmin):
list_display = ('date', 'email', 'status', get_message_preview)
class EmailTemplateAdmin(admin.ModelAdmin):
list_display = ('name', 'description_shortened', 'subject', 'created')
search_fields = ('name', 'description', 'subject')
fieldsets = [
(None, {
'fields': ('name', 'description'),
}),
('Email', {
'fields': ('subject', 'content', 'html_content'),
}),
]
def description_shortened(self, instance):
return Truncator(instance.description.split('\n')[0]).chars(200)
description_shortened.short_description = 'description'
description_shortened.admin_order_field = 'description'
admin.site.register(Email, EmailAdmin)
admin.site.register(Log, LogAdmin)
admin.site.register(EmailTemplate, EmailTemplateAdmin)
| from django.contrib import admin
from django.forms.widgets import TextInput
from django.utils.text import Truncator
from .fields import CommaSeparatedEmailField
from .models import Email, Log, EmailTemplate, STATUS
def get_message_preview(instance):
return (u'{0}...'.format(instance.message[:25]) if len(instance.message) > 25
else instance.message)
get_message_preview.short_description = 'Message'
class LogInline(admin.StackedInline):
model = Log
extra = 0
class CommaSeparatedEmailWidget(TextInput):
def __init__(self, *args, **kwargs):
super(CommaSeparatedEmailWidget, self).__init__(*args, **kwargs)
self.attrs.update({'class': 'vTextField'})
def _format_value(self, value):
# If the value is a string wrap it in a list so it does not get sliced.
if isinstance(value, basestring):
value = [value, ]
return ','.join([item for item in value])
def requeue(modeladmin, request, queryset):
"""An admin action to requeue emails."""
queryset.update(status=STATUS.queued)
requeue.short_description = 'Requeue selected emails'
class EmailAdmin(admin.ModelAdmin):
list_display = ('id', 'to_display', 'subject', 'template',
'status', 'last_updated')
inlines = [LogInline]
list_filter = ['status']
formfield_overrides = {
CommaSeparatedEmailField: {'widget': CommaSeparatedEmailWidget}
}
actions = [requeue]
def get_queryset(self, request):
return super(EmailAdmin, self).get_queryset(request).select_related('template')
def to_display(self, instance):
return ', '.join(instance.to)
to_display.short_description = 'to'
to_display.admin_order_field = 'to'
class LogAdmin(admin.ModelAdmin):
list_display = ('date', 'email', 'status', get_message_preview)
class EmailTemplateAdmin(admin.ModelAdmin):
list_display = ('name', 'description_shortened', 'subject', 'created')
search_fields = ('name', 'description', 'subject')
fieldsets = [
(None, {
'fields': ('name', 'description'),
}),
('Email', {
'fields': ('subject', 'content', 'html_content'),
}),
]
def description_shortened(self, instance):
return Truncator(instance.description.split('\n')[0]).chars(200)
description_shortened.short_description = 'description'
description_shortened.admin_order_field = 'description'
admin.site.register(Email, EmailAdmin)
admin.site.register(Log, LogAdmin)
admin.site.register(EmailTemplate, EmailTemplateAdmin)
| mit | Python |
40c16f7acbbbe68e7a038b6eebb81a84bdc7b729 | Relocate pipelines. | ondergetekende/logshipper,Kami/logshipper | logshipper/cmd.py | logshipper/cmd.py | # Copyright 2014 Koert van der Veer
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import eventlet
import logshipper.pipeline
ARGS = None
LOG = None
def main():
global LOG, ARGS
parser = argparse.ArgumentParser(
description="Processes log messages and sends them elsewhere")
parser.add_argument('--pipeline-path',
default="/etc/logshipper/",
help='Where to find pipelines (*.yml files)')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--debug', action='store_true')
ARGS = parser.parse_args()
if ARGS.debug:
log_level = 'DEBUG'
elif ARGS.verbose:
log_level = 'INFO'
else:
log_level = 'WARNING'
logging.basicConfig(level=log_level)
LOG = logging.getLogger(__name__)
pipeline_manager = logshipper.pipeline.PipelineManager(ARGS.pipeline_path)
pipeline_manager.start()
try:
while True:
eventlet.sleep(86400)
finally:
pipeline_manager.stop()
| # Copyright 2014 Koert van der Veer
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import eventlet
import logshipper.pipeline
ARGS = None
LOG = None
def main():
global LOG, ARGS
parser = argparse.ArgumentParser(
description="Processes log messages and sends them elsewhere")
parser.add_argument('--pipeline-path',
default="/etc/logshipper/pipelines/",
help='Where to find pipelines (*.yml files)')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--debug', action='store_true')
ARGS = parser.parse_args()
if ARGS.debug:
log_level = 'DEBUG'
elif ARGS.verbose:
log_level = 'INFO'
else:
log_level = 'WARNING'
logging.basicConfig(level=log_level)
LOG = logging.getLogger(__name__)
pipeline_manager = logshipper.pipeline.PipelineManager(ARGS.pipeline_path)
pipeline_manager.start()
try:
while True:
eventlet.sleep(86400)
finally:
pipeline_manager.stop()
| apache-2.0 | Python |
c159f9986dfd45a864561cb1ff7936e6b4a272de | bump version 2.0.3 | predicthq/sdk-py | predicthq/version.py | predicthq/version.py | __version__ = "2.0.3"
| __version__ = "2.0.2"
| mit | Python |
65c62108e257c6e82ed6ff24ea9884cee900c1c6 | read users from a csv file | alan412/WR_RFID_RaspberryPi,alan412/WR_RFID_RaspberryPi,alan412/WR_RFID_RaspberryPi,alan412/WR_RFID_RaspberryPi | rfid_test.py | rfid_test.py | from keyboard_alike import reader
import datetime
from phant import Phant
tags = {'0009909662' : 'Philip Smith',
'0004406858' : 'Joshua Smith',
'0009909876' : 'Abigail Smith',
'0003567797' : 'Linda Whipker'}
p = Phant('yAYZ9aJ98Kiyz4XNm5NW', 'location', 'id', 'name', 'time', private_key='4Wqom46m9niK2k8pzxp4')
def getName(idString):
with open('/home/pi/rfid/web/users.txt', 'r') as file:
for line in file:
row = line.split(', ');
if (row[0] == idString):
return row[1];
return 'Unknown'
def logAccess(id):
name = getName(id)
time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
p.log("Hopewell", id, name, time)
f = open('/home/pi/rfid/access_log.txt','a')
f.write("{idStr},{nameStr},{datetime}\n".format(idStr=id,nameStr=name,datetime=time))
f.close()
reader = reader.Reader(0x08ff, 0x0009, 84, 16, should_reset=False) # From the documentation - the VID and DEVID
reader.initialize()
while(1):
card = reader.read().strip() # get the card number
f = open('/home/pi/rfid/web/message.txt', 'w')
f.write("Hi {name}".format(name=getName(card)))
f.close()
logAccess(card)
reader.disconnect()
| from keyboard_alike import reader
import datetime
from phant import Phant
tags = {'0009909662' : 'Philip Smith',
'0004406858' : 'Joshua Smith',
'0009909876' : 'Abigail Smith',
'0004304910' : 'Dad'}
p = Phant('yAYZ9aJ98Kiyz4XNm5NW', 'location', 'id', 'name', 'time', private_key='4Wqom46m9niK2k8pzxp4')
def getName(idString):
try:
return tags[card]
except:
return 'Unknown: ' + idString
def logAccess(id):
name = getName(id)
time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
p.log("Hopewell", id, name, time)
f = open('/home/pi/rfid/access_log.txt','a')
f.write("{idStr},{nameStr},{datetime}\n".format(idStr=id,nameStr=name,datetime=time))
f.close()
reader = reader.Reader(0x08ff, 0x0009, 84, 16, should_reset=False) # From the documentation - the VID and DEVID
reader.initialize()
while(1):
card = reader.read().strip() # get the card number
f = open('/home/pi/rfid/web/message.txt', 'w')
f.write("Hi {name}".format(name=getName(card)))
f.close()
logAccess(card)
reader.disconnect()
| mit | Python |
18cd40db2dd18deb9db529ff7c28fc5e2d095658 | Add strict mode to schema | vberlier/nbtlib | nbtlib/schema.py | nbtlib/schema.py |
"""This module defines tools for creating tag schemas.
Exported items:
schema -- Helper function to define compound schemas
CompoundSchema -- `Compound` subclass that enforces a tag schema
"""
__all__ = ['schema', 'CompoundSchema']
from itertools import chain
from .tag import Compound
def schema(name, dct, *, strict=False):
"""Create a compound tag schema.
This function is a short convenience function that makes it easy to
subclass the base `CompoundSchema` class.
The `name` argument is the name of the class and `dct` should be a
dictionnary containing the actual schema. The schema should map keys
to tag types or other compound schemas.
If the `strict` keyword only argument is set to True, interacting
with keys that are not defined in the schema will raise a
`TypeError`.
"""
return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct,
'strict': strict})
class CompoundSchema(Compound):
"""Class that extends the base `Compound` tag by enforcing a schema.
Defining a custom schema is really useful if you're dealing with
recurring data structures. Subclassing the `CompoundSchema` class
with your own schema will save you some typing by casting all the
keys defined in the schema to the appropriate tag type.
The class inherits from `Compound` and will cast values to the
predefined tag types for all of the inherited mutating operations.
Class attributes:
schema -- Dictionnary mapping keys to tag types or other schemas
strict -- Boolean enabling strict schema validation
"""
__slots__ = ()
schema = {}
strict = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for key, value in self.items():
correct_value = self._cast(key, value)
if correct_value is not value:
super().__setitem__(key, correct_value)
def __setitem__(self, key, value):
super().__setitem__(key, self._cast(key, value))
def update(self, mapping, **kwargs):
pairs = chain(mapping.items(), kwargs.items())
super().update(
(key, self._cast(key, value)) for key, value in pairs
)
def _cast(self, key, value):
schema_type = self.schema.get(key, None)
if schema_type is None:
if self.strict:
raise TypeError(f'Invalid key {key!r}')
elif not isinstance(value, schema_type):
return schema_type(value)
return value
|
"""This module defines tools for creating tag schemas.
Exported items:
schema -- Helper function to define compound schemas
CompoundSchema -- `Compound` subclass that enforces a tag schema
"""
__all__ = ['schema', 'CompoundSchema']
from itertools import chain
from .tag import Compound
def schema(name, dct):
"""Create a compound tag schema.
This function is a short convenience function that makes it easy to
subclass the base `CompoundSchema` class.
The `name` argument is the name of the class and `dct` should be a
dictionnary containing the actual schema. The schema should map keys
to tag types or other compound schemas.
"""
return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct})
class CompoundSchema(Compound):
"""Class that extends the base `Compound` tag by enforcing a schema.
Defining a custom schema is really useful if you're dealing with
recurring data structures. Subclassing the `CompoundSchema` class
with your own schema will save you some typing by casting all the
keys defined in the schema to the appropriate tag type.
The class inherits from `Compound` and will cast values to the
predefined tag types for all of the inherited mutating operations.
Class attributes:
schema -- Dictionnary mapping keys to tag types or other schemas
"""
__slots__ = ()
schema = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for key, value in self.items():
correct_value = self._cast(key, value)
if correct_value is not value:
super().__setitem__(key, correct_value)
def __setitem__(self, key, value):
super().__setitem__(key, self._cast(key, value))
def update(self, mapping, **kwargs):
pairs = chain(mapping.items(), kwargs.items())
super().update(
(key, self._cast(key, value)) for key, value in pairs
)
def _cast(self, key, value):
schema_type = self.schema.get(key, None)
if schema_type and not isinstance(value, schema_type):
return schema_type(value)
return value
| mit | Python |
3e66d926499b91d13c84d155c2ed30a603a2b1b3 | fix for iodp_samples_magic | lfairchild/PmagPy,Caoimhinmg/PmagPy,lfairchild/PmagPy,Caoimhinmg/PmagPy,lfairchild/PmagPy,Caoimhinmg/PmagPy | programs/__init__.py | programs/__init__.py | #!/usr/bin/env pythonw
import sys
from os import path
command = path.split(sys.argv[0])[-1]
from program_envs import prog_env
mpl_env = prog_env.get(command[:-3])
import matplotlib
if mpl_env:
matplotlib.use(mpl_env)
else:
matplotlib.use("TKAgg")
import generic_magic
import sio_magic
import cit_magic
import _2g_bin_magic
import huji_magic
import huji_magic_new
import ldeo_magic
import iodp_srm_magic
import iodp_dscr_magic
import iodp_samples_magic
import pmd_magic
import tdt_magic
import jr6_jr6_magic
import jr6_txt_magic
import bgc_magic
__all__ = [generic_magic, sio_magic, cit_magic, _2g_bin_magic, huji_magic,
huji_magic_new, ldeo_magic, iodp_srm_magic, iodp_dscr_magic,
pmd_magic, tdt_magic, jr6_jr6_magic, jr6_txt_magic, bgc_magic,
iodp_samples_magic]
| import sys
from os import path
command = path.split(sys.argv[0])[-1]
from program_envs import prog_env
mpl_env = prog_env.get(command[:-3])
import matplotlib
if mpl_env:
matplotlib.use(mpl_env)
else:
matplotlib.use("TKAgg")
import generic_magic
import sio_magic
import cit_magic
import _2g_bin_magic
import huji_magic
import huji_magic_new
import ldeo_magic
import iodp_srm_magic
import iodp_dscr_magic
import pmd_magic
import tdt_magic
import jr6_jr6_magic
import jr6_txt_magic
import bgc_magic
__all__ = [generic_magic, sio_magic, cit_magic, _2g_bin_magic, huji_magic,
huji_magic_new, ldeo_magic, iodp_srm_magic, iodp_dscr_magic,
pmd_magic, tdt_magic, jr6_jr6_magic, jr6_txt_magic, bgc_magic]
| bsd-3-clause | Python |
df9d927f0da9a6d3591e08219ebeaf8f0a274376 | add feature for whether token contains at least one number #11 | markbaas/usaddress,yl2695/usaddress,frankleng/usaddress,cubgs53/usaddress,ahlusar1989/probablepeople | usaddress/__init__.py | usaddress/__init__.py | import os
import string
import pycrfsuite
import re
TAGGER = pycrfsuite.Tagger()
TAGGER.open(os.path.split(os.path.abspath(__file__))[0]
+ '/usaddr.crfsuite')
def parse(address_string) :
re_tokens = re.compile(r"""
\b[^\s]+(?=\b) # 'F-H ' -> ['F-H']
|
[^\w\s](?=\s) # [', '] -> [',']
""",
re.VERBOSE | re.UNICODE)
tokens = re_tokens.findall(address_string)
features = addr2features(tokens)
tags = TAGGER.tag(features)
return zip(tokens, tags)
def tokenFeatures(token) :
features = {'token.lower' : token.lower(),
'token.isupper' : token.isupper(),
'token.islower' : token.islower(),
'token.istitle' : token.istitle(),
'token.isalldigits' : token.isdigit(),
'token.hasadigit' : any(char.isdigit() for char in token),
'token.isstartdigit' : token[0].isdigit(),
'digit.length' : token.isdigit() * len(token),
'token.ispunctuation' : (token in string.punctuation),
'token.length' : len(token),
#'token.isdirection' : (token.lower in ['north', 'east', 'south', 'west', 'n', 'e', 's', 'w'])
}
return features
def addr2features(address):
previous_feature = tokenFeatures(address[0])
feature_sequence = [previous_feature]
for token in address[1:] :
next_feature = tokenFeatures(token)
for key, value in next_feature.items() :
feature_sequence[-1][('next', key)] = value
feature_sequence.append(next_feature.copy())
for key, value in previous_feature.items() :
feature_sequence[-1][('previous', key)] = value
previous_feature = next_feature
feature_sequence[0]['address.start'] = True
feature_sequence[-1]['address.end'] = True
if len(feature_sequence) > 1 :
feature_sequence[1][('previous', 'address.start')] = True
feature_sequence[-2][('next', 'address.end')] = True
feature_sequence = [[str(each) for each in feature.items()]
for feature in feature_sequence]
return feature_sequence
| import os
import string
import pycrfsuite
import re
TAGGER = pycrfsuite.Tagger()
TAGGER.open(os.path.split(os.path.abspath(__file__))[0]
+ '/usaddr.crfsuite')
def parse(address_string) :
re_tokens = re.compile(r"""
\b[^\s]+(?=\b) # 'F-H ' -> ['F-H']
|
[^\w\s](?=\s) # [', '] -> [',']
""",
re.VERBOSE | re.UNICODE)
tokens = re_tokens.findall(address_string)
features = addr2features(tokens)
tags = TAGGER.tag(features)
return zip(tokens, tags)
def tokenFeatures(token) :
features = {'token.lower' : token.lower(),
'token.isupper' : token.isupper(),
'token.islower' : token.islower(),
'token.istitle' : token.istitle(),
'token.isdigit' : token.isdigit(),
'token.isstartdigit' : token[0].isdigit(),
'digit.length' : token.isdigit() * len(token),
'token.ispunctuation' : (token in string.punctuation),
'token.length' : len(token),
#'token.isdirection' : (token.lower in ['north', 'east', 'south', 'west', 'n', 'e', 's', 'w'])
}
return features
def addr2features(address):
previous_feature = tokenFeatures(address[0])
feature_sequence = [previous_feature]
for token in address[1:] :
next_feature = tokenFeatures(token)
for key, value in next_feature.items() :
feature_sequence[-1][('next', key)] = value
feature_sequence.append(next_feature.copy())
for key, value in previous_feature.items() :
feature_sequence[-1][('previous', key)] = value
previous_feature = next_feature
feature_sequence[0]['address.start'] = True
feature_sequence[-1]['address.end'] = True
if len(feature_sequence) > 1 :
feature_sequence[1][('previous', 'address.start')] = True
feature_sequence[-2][('next', 'address.end')] = True
feature_sequence = [[str(each) for each in feature.items()]
for feature in feature_sequence]
return feature_sequence
| mit | Python |
eaf51e8054d5b3848fc30d3e132bcbb19866db65 | add more robust auth plugin | fkmclane/web.py | web/auth.py | web/auth.py | import base64
import web
class AuthError(web.HTTPError):
def __init__(self, scheme, realm, code=401, message=None, headers=None, status_message=None):
super.__init__(code, message, headers, status_message)
self.scheme = scheme
self.realm = realm
if self.headers is None:
self.headers = web.HTTPHeaders()
self.headers.set('WWW-Authenticate', self.scheme + ' realm="' + self.realm + '"')
class AuthMixIn:
realm = 'Unknown'
def schemes(self):
# lots of magic for finding all lower case attributes beginning with 'auth_' and removing the 'auth_'
return (scheme[5:] for scheme in dir(self) if scheme.startswith('auth_') and scheme.islower())
def authorized(self, scheme, token):
if not hasattr(self, 'auth_' + self.method):
raise AuthError(','.join(scheme.title() for scheme in self.schemes()), self.realm)
return getattr(self, 'auth_' + self.method)()
def respond(self):
auth = self.request.headers.get('Authorization')
if not auth:
raise AuthError(self.default, self.realm)
scheme, token = auth.split(' ', 1)
self.auth = self.authorized(scheme, token)
return super().respond()
class AuthHandler(AuthMixIn, web.HTTPHandler):
pass
class BasicAuthMixIn(AuthMixIn):
def auth_basic(self, auth):
user, password = base64.b64decode(auth.encode(web.default_encoding)).decode(web.default_encoding).split(':', 1)
auth = self.login(user, password)
if not auth:
raise AuthError('Basic', self.realm)
return auth
class BasicAuthHandler(BasicAuthMixIn, web.HTTPHandler):
pass
| import base64
import web
class AuthMixIn:
scheme = 'None'
def authorized(self, auth):
return True
def authenticate(self):
auth_headers = web.HTTPHeaders()
auth_headers.set('WWW-Authenticate', self.scheme)
raise web.HTTPError(401, headers=auth_headers)
def respond(self):
auth = self.request.headers.get('Authorization')
if auth is None or not self.authorized(auth):
self.authenticate()
return super().respond()
class AuthHandler(AuthMixIn, web.HTTPHandler):
pass
class BasicAuthMixIn(AuthMixIn):
scheme = 'Basic'
def authorized(self, auth):
user, password = base64.b64decode(auth.encode(web.default_encoding)).decode(web.default_encoding).split(':', 1)
return self.login(user, password)
class BasicAuthHandler(BasicAuthMixIn, web.HTTPHandler):
pass
| mit | Python |
e860f1adfcae859fcfbb56dd6cf2c90dcac8d968 | Fix resolution size | uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged | csunplugged/at_a_distance/settings.py | csunplugged/at_a_distance/settings.py | """Settings for the at a distance application."""
AT_A_DISTANCE_INTRODUCTION_FILENAME = 'introduction.md'
AT_A_DISTANCE_SLIDES_TEMPLATE_BASE_PATH = 'at_a_distance/lesson-slides'
AT_A_DISTANCE_SLIDE_RESOLUTION_HEIGHT = "1080"
AT_A_DISTANCE_SLIDE_RESOLUTION_WIDTH = "1920"
# Settings computed from above settings
AT_A_DISTANCE_SLIDE_RESOLUTION = f'{AT_A_DISTANCE_SLIDE_RESOLUTION_WIDTH}x{AT_A_DISTANCE_SLIDE_RESOLUTION_HEIGHT}'
| """Settings for the at a distance application."""
AT_A_DISTANCE_INTRODUCTION_FILENAME = 'introduction.md'
AT_A_DISTANCE_SLIDES_TEMPLATE_BASE_PATH = 'at_a_distance/lesson-slides'
AT_A_DISTANCE_SLIDE_RESOLUTION_HEIGHT = "1080"
AT_A_DISTANCE_SLIDE_RESOLUTION_WIDTH = "1920"
# Settings computed from above settings
AT_A_DISTANCE_SLIDE_RESOLUTION = f'{AT_A_DISTANCE_SLIDE_RESOLUTION_HEIGHT}x{AT_A_DISTANCE_SLIDE_RESOLUTION_WIDTH}'
| mit | Python |
44b84e2a082eab34aaac97ffa5ecfc449db7e3c7 | update secondary latency collector | couchbase/cbagent,mikewied/cbagent | cbagent/collectors/secondary_latency.py | cbagent/collectors/secondary_latency.py | from cbagent.collectors import Collector
import os.path
class SecondaryLatencyStats(Collector):
COLLECTOR = "secondaryscan_latency"
def _get_secondaryscan_latency(self):
stats = {}
if os.path.isfile(self.secondary_statsfile):
with open(self.secondary_statsfile, 'rb') as fh:
first = next(fh).decode()
fh.seek(-400, 2)
last = fh.readlines()[-1].decode()
duration = last.split(',')[-1]
stats = {}
latency = duration.split(':')[1]
latency = latency.rstrip()
stats[duration.split(':')[0]] = latency
return stats
def sample(self):
stats = self._get_secondaryscan_latency()
if stats:
self.update_metric_metadata(stats.keys())
self.store.append(stats, cluster=self.cluster, collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
| from cbagent.collectors import Collector
import os.path
class SecondaryLatencyStats(Collector):
COLLECTOR = "secondaryscan_latency"
def _get_secondaryscan_latency(self):
stats = {}
if os.path.isfile(self.secondary_statsfile):
with open(self.secondary_statsfile, 'rb') as fh:
first = next(fh).decode()
fh.seek(-200, 2)
last = fh.readlines()[-1].decode()
duration = last.split(',')[-1]
stats = {}
stats[duration.split(':')[0]] = duration.split(':')[1]
return stats
def sample(self):
stats = self._get_secondaryscan_latency()
if stats:
self.update_metric_metadata(stats.keys())
self.store.append(stats, cluster=self.cluster, collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
| apache-2.0 | Python |
4bff7a161fb5de20efef0f514c3bdf42cd37f857 | Fix error with starting server as a non-daemon | cadyyan/mcserver-tools,cadyyan/mcserver-tools | mcserver/start.py | mcserver/start.py | """
Utlities for starting the server
"""
import daemon
import lockfile.pidlockfile
import os
import os.path
import subprocess
from mcserver import base, reflection
def start_server(path, is_daemon = False, uid = None, gid = None):
"""
Start the server. Optionally start it as a daemon.
"""
base._validate_server_path(path)
settings = base.load_server_settings(path)
jvm = base._get_setting_jvm(settings)
max_heap = base._get_setting_max_heap(settings)
max_stack = base._get_setting_max_stack(settings)
perm_gen = base._get_setting_perm_gen(settings)
jar = base._get_setting_jar(settings)
extra_args = base._get_extra_start_args(settings)
base.LOGGER.info('Starting server...')
if not is_daemon:
if uid != None:
base.LOGGER.warn('User option is ignored when not running as a daemon')
uid = None
if gid != None:
base.LOGGER.warn('Group option is ignored when not running as a daemon')
gid = None
if is_daemon:
base.LOGGER.debug(
'Starting daemon process with user and group: {user}, {group}'.format(
user = uid,
group = gid,
)
)
launcher_config = base._get_launcher(settings)
launcher_class = reflection.get_class(launcher_config['class'])
launcher = launcher_class(
path,
**launcher_config
)
launcher.start(
jvm,
max_heap,
max_stack,
perm_gen,
jar,
extra_args,
uid,
gid,
)
else:
command = base._build_command(jvm, max_heap, max_stack, perm_gen, jar, extra_args)
base.LOGGER.debug('Starting server with command {0}'.format(command))
process = subprocess.Popen(command, shell = True) # TODO: theres no more command here!
process.wait()
| """
Utlities for starting the server
"""
import daemon
import lockfile.pidlockfile
import os
import os.path
import subprocess
from mcserver import base, reflection
def start_server(path, is_daemon = False, uid = None, gid = None):
"""
Start the server. Optionally start it as a daemon.
"""
base._validate_server_path(path)
settings = base.load_server_settings(path)
jvm = base._get_setting_jvm(settings)
max_heap = base._get_setting_max_heap(settings)
max_stack = base._get_setting_max_stack(settings)
perm_gen = base._get_setting_perm_gen(settings)
jar = base._get_setting_jar(settings)
extra_args = base._get_extra_start_args(settings)
base.LOGGER.info('Starting server...')
if not is_daemon:
if uid != None:
base.LOGGER.warn('User option is ignored when not running as a daemon')
uid = None
if gid != None:
base.LOGGER.warn('Group option is ignored when not running as a daemon')
gid = None
if is_daemon:
base.LOGGER.debug(
'Starting daemon process with user and group: {user}, {group}'.format(
user = uid,
group = gid,
)
)
launcher_config = base._get_launcher(settings)
launcher_class = reflection.get_class(launcher_config['class'])
launcher = launcher_class(
path,
**launcher_config
)
launcher.start(
jvm,
max_heap,
max_stack,
perm_gen,
jar,
extra_args,
uid,
gid,
)
else:
process = subprocess.Popen(command, shell = True) # TODO: theres no more command here!
process.wait()
| mit | Python |
deca2df30576b59450ac00a9800042d7cb97d6a9 | update capture/color workarounds | xflr6/dmengine | run-tests.py | run-tests.py | #!/usr/bin/env python
# run-tests.py
import sys
import pytest
ARGS = [
#'--exitfirst',
#'--pdb',
]
if 'idlelib' in sys.modules or 'thonny' in sys.modules:
ARGS.extend(['--capture=sys', '--color=no'])
elif sys.version_info[0] == 2 and 'win_unicode_console' in sys.modules:
ARGS.append('--capture=sys')
pytest.main(ARGS + sys.argv[1:])
| #!/usr/bin/env python
# run-tests.py
import sys
import platform
import pytest
ARGS = [
#'--exitfirst',
#'--pdb',
]
if 'idlelib' in sys.modules:
ARGS.append('--color=no')
if platform.system().lower() == 'windows':
ARGS.append('--capture=sys')
pytest.main(ARGS + sys.argv[1:])
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.