commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
3d1f56bd608aedb13517629d3878739bed62c67b | Fix the bidict tests | tom-mi/pyrad,GIC-de/pyrad | pyrad/tests/testBidict.py | pyrad/tests/testBidict.py | import operator
import unittest
from pyrad.bidict import BiDict
class BiDictTests(unittest.TestCase):
def setUp(self):
self.bidict=BiDict()
def testStartEmpty(self):
self.assertEqual(len(self.bidict), 0)
self.assertEqual(len(self.bidict.forward), 0)
self.assertEqual(len(self.bidict.backward), 0)
def testLength(self):
self.assertEqual(len(self.bidict), 0)
self.bidict.Add("from", "to")
self.assertEqual(len(self.bidict), 1)
del self.bidict["from"]
self.assertEqual(len(self.bidict), 0)
def testDeletion(self):
self.assertRaises(KeyError, operator.delitem, self.bidict, "missing")
self.bidict.Add("missing", "present")
del self.bidict["missing"]
def testBackwardDeletion(self):
self.assertRaises(KeyError, operator.delitem, self.bidict, "missing")
self.bidict.Add("missing", "present")
del self.bidict["present"]
self.assertEqual(self.bidict.HasForward("missing"), False)
def testForwardAccess(self):
self.bidict.Add("shake", "vanilla")
self.bidict.Add("pie", "custard")
self.assertEqual(self.bidict.HasForward("shake"), True)
self.assertEqual(self.bidict.GetForward("shake"), "vanilla")
self.assertEqual(self.bidict.HasForward("pie"), True)
self.assertEqual(self.bidict.GetForward("pie"), "custard")
self.assertEqual(self.bidict.HasForward("missing"), False)
self.assertRaises(KeyError, self.bidict.GetForward, "missing")
def testBackwardAccess(self):
self.bidict.Add("shake", "vanilla")
self.bidict.Add("pie", "custard")
self.assertEqual(self.bidict.HasBackward("vanilla"), True)
self.assertEqual(self.bidict.GetBackward("vanilla"), "shake")
self.assertEqual(self.bidict.HasBackward("missing"), False)
self.assertRaises(KeyError, self.bidict.GetBackward, "missing")
def testItemAccessor(self):
self.bidict.Add("shake", "vanilla")
self.bidict.Add("pie", "custard")
self.assertRaises(KeyError, operator.getitem, self.bidict, "missing")
self.assertEquals(self.bidict["shake"], "vanilla")
self.assertEquals(self.bidict["pie"], "custard")
| import operator
import unittest
from pyrad.bidict import BiDict
class BiDictTests(unittest.TestCase):
def setUp(self):
self.bidict=BiDict()
def testStartEmpty(self):
self.assertEqual(len(self.bidict), 0)
self.assertEqual(len(self.bidict.forward), 0)
self.assertEqual(len(self.bidict.backward), 0)
def testLength(self):
self.assertEqual(len(self.bidict), 0)
self.bidict.Add("from", "to")
self.assertEqual(len(self.bidict), 1)
del self.bidict["from"]
self.assertEqual(len(self.bidict), 0)
def testDeletion(self):
self.assertRaises(KeyError, operator.delitem, self.bidict, "missing")
self.bidict.Add("missing", "present")
del self.bidict["missing"]
def testBackwardDeletion(self):
self.assertRaises(KeyError, operator.delitem, self.bidict, "missing")
self.bidict.Add("missing", "present")
del self.bidict["present"]
self.assertEqual(self.bidict.HasForward("missing"), False)
def testForwardAccess(self):
self.bidict.Add("shake", "vanilla")
self.bidict.Add("pie", "custard")
self.assertEqual(self.bidict.HasForward("shake"), True)
self.assertEqual(self.bidict.GetForward("shake"), "vanilla")
self.assertEqual(self.bidict.HasForward("pie"), True)
self.assertEqual(self.bidict.GetForward("pie"), "custard")
self.assertEqual(self.bidict.HasForward("missing"), False)
self.assertRaises(KeyError, self.bidict.GetForward("missing"))
def testBackwardAccess(self):
self.bidict.Add("shake", "vanilla")
self.bidict.Add("pie", "custard")
self.assertEqual(self.bidict.HasBackward("vanilla"), True)
self.assertEqual(self.bidict.GetBackward("vanilla"), "shake")
self.assertEqual(self.bidict.HasBackward("missing"), False)
self.assertRaises(KeyError, self.bidict.GetBackward("missing"))
def testItemAccessor(self):
self.bidict.Add("shake", "vanilla")
self.bidict.Add("pie", "custard")
self.assertRaises(KeyError, operator.getitem, self.bidict, "missing")
self.assertEquals(self.bidict["shake"], "vanilla")
self.assertEquals(self.bidict["pie"], "custard")
| bsd-3-clause | Python |
261328507e494683a33b74c3218788cb31cb4fab | update document __init__ | mylokin/mongoext | mongoext/document.py | mongoext/document.py | from __future__ import absolute_import
import mongoext.collection
import mongoext.fields
import mongoext.exc
class MetaDocument(type):
def __new__(cls, name, bases, attrs):
fields = {}
for base in bases:
for attr, obj in vars(base).iteritems():
if issubclass(type(obj), mongoext.fields.Field):
fields[attr] = obj
for attr, obj in attrs.iteritems():
if issubclass(type(obj), mongoext.fields.Field):
fields[attr] = obj
attrs['FIELDS'] = fields
return super(MetaDocument, cls).__new__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
for attr, obj in vars(cls).iteritems():
if issubclass(type(obj), mongoext.collection.Collection):
obj._model = cls
super(MetaDocument, cls).__init__(name, bases, attrs)
class Document(object):
__metaclass__ = MetaDocument
FIELDS = None
_id = mongoext.fields.Field()
def __init__(self, **kw):
for name in self.FIELDS:
validate = self.FIELDS[name]
value = kw.get(name)
if value:
try:
setattr(self, name, validate(value))
except ValueError as e:
raise ValueError('{}: {}'.format(e.message, name))
else:
setattr(self, name, None)
def save(self):
self.__init__(**vars(self))
def to_dict(self):
return {f: getattr(self, f, None) for f in self.FIELDS}
def __repr__(self):
return '<{}: {}>'.format(type(self).__name__, self._id)
| from __future__ import absolute_import
import mongoext.collection
import mongoext.fields
import mongoext.exc
class MetaDocument(type):
def __new__(cls, name, bases, attrs):
fields = {}
for base in bases:
for attr, obj in vars(base).iteritems():
if issubclass(type(obj), mongoext.fields.Field):
fields[attr] = obj
for attr, obj in attrs.iteritems():
if issubclass(type(obj), mongoext.fields.Field):
fields[attr] = obj
attrs['FIELDS'] = fields
return super(MetaDocument, cls).__new__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
for attr, obj in vars(cls).iteritems():
if issubclass(type(obj), mongoext.collection.Collection):
obj._model = cls
super(MetaDocument, cls).__init__(name, bases, attrs)
class Document(object):
__metaclass__ = MetaDocument
FIELDS = None
_id = mongoext.fields.Field()
def __init__(self, **kw):
for name, field in self.FIELDS.iteritems():
try:
setattr(self, name, field(kw.get(name, mongoext.exc.Missed)))
except ValueError as e:
raise ValueError('{}: {}'.format(e.message, name))
def save(self):
self.__init__(**vars(self))
def to_dict(self):
return {f: getattr(self, f, None) for f in self.FIELDS}
def __repr__(self):
return '<{}: {}>'.format(type(self).__name__, self._id)
| mit | Python |
06a8fdf72b2ad99797a60f282338d5a983e7dbcf | add ilb hc to tested resource count | apigee/terraform-modules,apigee/terraform-modules | tests/samples/test_ilb_mtls.py | tests/samples/test_ilb_mtls.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from .utils import *
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "../../samples/x-ilb-mtls")
@pytest.fixture(scope="module")
def resources(recursive_plan_runner):
_, resources = recursive_plan_runner(
FIXTURES_DIR,
tf_var_file=os.path.join(FIXTURES_DIR, "x-demo.tfvars"),
project_id="testonly",
project_create="true"
)
return resources
def test_resource_count(resources):
"Test total number of resources created."
assert len(resources) == 41
def test_apigee_instance(resources):
"Test Apigee Instance Resource"
assert_instance(resources, "europe-west1", "10.0.0.0/22")
def test_apigee_instance_attachment(resources):
"Test Apigee Instance Attachments."
assert_instance_attachment(resources, ["test1", "test2"])
def test_envgroup_attachment(resources):
"Test Apigee Envgroup Attachments."
assert_envgroup_attachment(resources, ["test1", "test2"])
def test_envgroup(resources):
"Test env group."
assert_envgroup_name(resources, "test")
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from .utils import *
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "../../samples/x-ilb-mtls")
@pytest.fixture(scope="module")
def resources(recursive_plan_runner):
_, resources = recursive_plan_runner(
FIXTURES_DIR,
tf_var_file=os.path.join(FIXTURES_DIR, "x-demo.tfvars"),
project_id="testonly",
project_create="true"
)
return resources
def test_resource_count(resources):
"Test total number of resources created."
assert len(resources) == 40
def test_apigee_instance(resources):
"Test Apigee Instance Resource"
assert_instance(resources, "europe-west1", "10.0.0.0/22")
def test_apigee_instance_attachment(resources):
"Test Apigee Instance Attachments."
assert_instance_attachment(resources, ["test1", "test2"])
def test_envgroup_attachment(resources):
"Test Apigee Envgroup Attachments."
assert_envgroup_attachment(resources, ["test1", "test2"])
def test_envgroup(resources):
"Test env group."
assert_envgroup_name(resources, "test")
| apache-2.0 | Python |
94307da339d586aca85af975cbd49d6f46d35f8a | add security | iw3hxn/LibrERP,iw3hxn/LibrERP,iw3hxn/LibrERP,iw3hxn/LibrERP,iw3hxn/LibrERP | control_dashboard/__openerp__.py | control_dashboard/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# by Bortolatto Ivan (ivan.bortolatto at didotech.com)
# Copyright (C) 2013 Didotech Inc. (<http://www.didotech.com>)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name": "Virtual Desk for Alert",
"version": "1.3.7",
"depends": ["base", "mail", "base_calendar", "email_template"],
'complexity': "easy",
'description': """
This is a full-featured calendar system.
========================================
It supports for Appointment:
- Calendar of events
- Alerts (create requests)
- Recurring events
- Invitations to people
If you need to manage your meetings, you should install the CRM module.
""",
"author": "Didotech inc.",
'category': 'Tools',
'website': 'http://www.didotech.com',
"init_xml": [
],
'images': [],
"demo_xml": [],
"update_xml": [
"ir_alert_view.xml",
"ir_alert_workflow.xml",
"board_alert_view.xml",
"data/alert_config_data.xml",
"security/ir.model.access.csv",
],
"test": [],
"installable": True,
"auto_install": False,
"images": [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# by Bortolatto Ivan (ivan.bortolatto at didotech.com)
# Copyright (C) 2013 Didotech Inc. (<http://www.didotech.com>)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name": "Virtual Desk for Alert",
"version": "1.3.7",
"depends": ["base", "mail", "base_calendar", "email_template"],
'complexity': "easy",
'description': """
This is a full-featured calendar system.
========================================
It supports for Appointment:
- Calendar of events
- Alerts (create requests)
- Recurring events
- Invitations to people
If you need to manage your meetings, you should install the CRM module.
""",
"author": "Didotech inc.",
'category': 'Tools',
'website': 'http://www.didotech.com',
"init_xml": [
],
'images': [],
"demo_xml": [],
"update_xml": [
"ir_alert_view.xml",
"ir_alert_workflow.xml",
"board_alert_view.xml",
"data/alert_config_data.xml"
],
"test": [],
"installable": True,
"auto_install": False,
"images": [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
8ae35fc63bd2cc7e6e07b68ace455147e1b2e1f1 | clean code | Neurita/darwin | tests/test_binaryclassifier.py | tests/test_binaryclassifier.py |
import numpy as np
from sklearn import svm, datasets
from darwin.pipeline import ClassificationPipeline
def test_binary_classification_with_classification_pipeline():
# generate the dataset
n_samples = 100
n_features = 20
x, y = datasets.make_gaussian_quantiles(mean=None, cov=1.0, n_samples=n_samples, n_features=n_features, n_classes=2,
shuffle=True, random_state=1)
# -- test with darwin
classifier_name = 'rbfsvm' #'linsvm'
cvmethod = '10'
n_feats = x.shape[1]
pipe = ClassificationPipeline(n_feats=n_feats, clfmethod=classifier_name, cvmethod=cvmethod)
results, metrics = pipe.cross_validation(x, y)
assert(results is not None)
return results, metrics
results, metrics = test_binary_classification_with_classification_pipeline()
# def test_
#
# inst = instance.LearnerInstantiator()
# learner_item_name = 'LinearSVC'
# classifier, param_grid = inst.get_method_with_grid(learner_item_name)
|
import numpy as np
from sklearn import svm, datasets
from darwin.pipeline import ClassificationPipeline
def test_binary_classification_with_classification_pipeline():
# generate the dataset
n_samples=100
n_features=20
x, y = datasets.make_gaussian_quantiles(mean=None, cov=1.0, n_samples=n_samples,
n_features=n_features, n_classes=2,
shuffle=True, random_state=1)
# another way to generate the data
# x, y = datasets.make_hastie_10_2(n_samples=10, random_state=1)
# -- test with darwin
classifier_name='linsvm'
cvmethod='10'
n_feats = x.shape[1]
pipe = ClassificationPipeline(n_feats=n_feats, clfmethod=classifier_name,
cvmethod=cvmethod)
results, metrics = pipe.cross_validation(x, y)
assert(results is not None)
# def test_
#
# inst = instance.LearnerInstantiator()
# learner_item_name = 'LinearSVC'
# classifier, param_grid = inst.get_method_with_grid(learner_item_name)
| bsd-3-clause | Python |
1036199d2d886d85cdf2c77477332d7f2f66f505 | Handle rename | mcronce/rabbitmq_stats | rabbitmqStats/__init__.py | rabbitmqStats/__init__.py | from RabbitMQStats import *
| from rabbitmqStats import *
| mit | Python |
e3f4b2f66e55c293c20296b8857adb46c93cd46d | fix cron cmd | teemuhirsikangas/magicaespeculo,teemuhirsikangas/magicaespeculo,teemuhirsikangas/magicaespeculo,teemuhirsikangas/magicaespeculo | scripts/send_waterleak.py | scripts/send_waterleak.py | #!/usr/bin/python
#run this script on reboot, add it to the cron job
#@reboot sudo nohup python /home/pi/magicaespeculo/scripts/send_waterleak.py > /dev/null 2>&1
import sys
import RPi.GPIO as GPIO
import time
import datetime
import schedule
import paho.mqtt.publish as publish
import json
import requests
import config
GPIO.setmode(GPIO.BCM)
WL_PIN = 22
GPIO.setup(WL_PIN, GPIO.IN)
MQTT_USER = config.username
MQTT_PWD = config.password
MQTT_BROKER_ADDR = config.MQTT_ADDRESS
AUTH = {'username':config.username, 'password':config.password}
def WATER(WL_PIN):
#time = str(datetime.datetime.now().replace(microsecond=0).isoformat(' '))
epoch_time = int(time.time())
#sensor is 1 when no water, 0 when water is detected, so let's flip it
state = GPIO.input(WL_PIN)
state ^= 1
payload = { 'time' : epoch_time, 'state' : state }
payload_string = json.dumps(payload)
publish.single("home/engineroom/waterleak", payload_string, retain=True, hostname=MQTT_BROKER_ADDR, auth=AUTH)
def DAILY():
#print("TEST")
WATER(WL_PIN)
schedule.every().day.at("06:00").do(DAILY)
schedule.every().day.at("18:00").do(DAILY)
time.sleep(1)
try:
GPIO.add_event_detect(WL_PIN, GPIO.FALLING, callback=WATER)
while (True):
#sleep for 12 hours and send status report that the script is stil alive
schedule.run_pending()
#WATER(WL_PIN)
time.sleep(1)
except KeyboardInterrupt:
print ("Quit")
GPIO.cleanup()
| #!/usr/bin/python
#run this script on reboot, add it to the cron job
# nohup
#@reboot sudo nohup python /home/pi/magicaespeculo/scripts/send_waterleak.py
import sys
import RPi.GPIO as GPIO
import time
import datetime
import schedule
import paho.mqtt.publish as publish
import json
import requests
import config
GPIO.setmode(GPIO.BCM)
WL_PIN = 22
GPIO.setup(WL_PIN, GPIO.IN)
MQTT_USER = config.username
MQTT_PWD = config.password
MQTT_BROKER_ADDR = config.MQTT_ADDRESS
AUTH = {'username':config.username, 'password':config.password}
def WATER(WL_PIN):
#time = str(datetime.datetime.now().replace(microsecond=0).isoformat(' '))
epoch_time = int(time.time())
#sensor is 1 when no water, 0 when water is detected, so let's flip it
state = GPIO.input(WL_PIN)
state ^= 1
payload = { 'time' : epoch_time, 'state' : state }
payload_string = json.dumps(payload)
publish.single("home/engineroom/waterleak", payload_string, retain=True, hostname=MQTT_BROKER_ADDR, auth=AUTH)
def DAILY():
#print("TEST")
WATER(WL_PIN)
schedule.every().day.at("06:00").do(DAILY)
schedule.every().day.at("18:00").do(DAILY)
time.sleep(1)
try:
GPIO.add_event_detect(WL_PIN, GPIO.FALLING, callback=WATER)
while (True):
#sleep for 12 hours and send status report that the script is stil alive
schedule.run_pending()
#WATER(WL_PIN)
time.sleep(1)
except KeyboardInterrupt:
print ("Quit")
GPIO.cleanup()
| mit | Python |
1dc1bb91fffd8175898ff920cc9497c7b45c912a | Make sure the `type hints` support is working for FastAPI | rollbar/pyrollbar | rollbar/test/test_fastapi.py | rollbar/test/test_fastapi.py | import sys
try:
from unittest import mock
except ImportError:
import mock
import unittest2
from rollbar.test import BaseTest
ALLOWED_PYTHON_VERSION = sys.version_info[0] >= 3 and sys.version_info[1] >= 6
@unittest2.skipUnless(ALLOWED_PYTHON_VERSION, "FastAPI requires Python3.6+")
class FastAPIMiddlewareTest(BaseTest):
def test_should_set_fastapi_hook(self):
import rollbar
import rollbar.contrib.fastapi
self.assertEqual(rollbar.BASE_DATA_HOOK, rollbar.contrib.fastapi._hook)
def test_should_catch_and_report_errors(self):
from fastapi import FastAPI
from fastapi.testclient import TestClient
from rollbar.contrib.fastapi import FastAPIMiddleware
app = FastAPI()
app.add_middleware(FastAPIMiddleware)
@app.get("/")
async def read_root():
1 / 0
client = TestClient(app)
with mock.patch("rollbar.report_exc_info") as mock_report:
with self.assertRaises(ZeroDivisionError):
client.get("/")
self.assertEqual(mock_report.call_count, 1)
args, kwargs = mock_report.call_args
self.assertEqual(kwargs, {})
exc_info, request = args
exc_type, exc_value, exc_tb = exc_info
self.assertEqual(exc_type, ZeroDivisionError)
self.assertIsInstance(exc_value, ZeroDivisionError)
def test_should_support_type_hints(self):
from starlette.types import ASGIApp, Receive, Scope, Send
import rollbar.contrib.fastapi
self.assertDictEqual(
rollbar.contrib.fastapi.FastAPIMiddleware.__call__.__annotations__,
{"scope": Scope, "receive": Receive, "send": Send, "return": None},
)
| import sys
try:
from unittest import mock
except ImportError:
import mock
import unittest2
from rollbar.test import BaseTest
ALLOWED_PYTHON_VERSION = sys.version_info[0] >= 3 and sys.version_info[1] >= 6
@unittest2.skipUnless(ALLOWED_PYTHON_VERSION, "FastAPI requires Python3.6+")
class FastAPIMiddlewareTest(BaseTest):
def test_should_set_fastapi_hook(self):
import rollbar
import rollbar.contrib.fastapi
self.assertEqual(rollbar.BASE_DATA_HOOK, rollbar.contrib.fastapi._hook)
def test_should_catch_and_report_errors(self):
from fastapi import FastAPI
from fastapi.testclient import TestClient
from rollbar.contrib.fastapi import FastAPIMiddleware
app = FastAPI()
app.add_middleware(FastAPIMiddleware)
@app.get("/")
async def read_root():
1 / 0
client = TestClient(app)
with mock.patch("rollbar.report_exc_info") as mock_report:
with self.assertRaises(ZeroDivisionError):
client.get("/")
self.assertEqual(mock_report.call_count, 1)
args, kwargs = mock_report.call_args
self.assertEqual(kwargs, {})
exc_info, request = args
exc_type, exc_value, exc_tb = exc_info
self.assertEqual(exc_type, ZeroDivisionError)
self.assertIsInstance(exc_value, ZeroDivisionError)
| mit | Python |
c3b1f8c97f89e5b9e8b8e74992631bac33bdde5f | Implement a test if read_user_choice raises on invalid options | lucius-feng/cookiecutter,foodszhang/cookiecutter,tylerdave/cookiecutter,atlassian/cookiecutter,kkujawinski/cookiecutter,Vauxoo/cookiecutter,sp1rs/cookiecutter,pjbull/cookiecutter,dajose/cookiecutter,benthomasson/cookiecutter,luzfcb/cookiecutter,stevepiercy/cookiecutter,luzfcb/cookiecutter,foodszhang/cookiecutter,vintasoftware/cookiecutter,nhomar/cookiecutter,dajose/cookiecutter,terryjbates/cookiecutter,lgp171188/cookiecutter,audreyr/cookiecutter,atlassian/cookiecutter,willingc/cookiecutter,christabor/cookiecutter,vintasoftware/cookiecutter,nhomar/cookiecutter,venumech/cookiecutter,sp1rs/cookiecutter,michaeljoseph/cookiecutter,takeflight/cookiecutter,willingc/cookiecutter,lucius-feng/cookiecutter,stevepiercy/cookiecutter,audreyr/cookiecutter,christabor/cookiecutter,ramiroluz/cookiecutter,hackebrot/cookiecutter,lgp171188/cookiecutter,takeflight/cookiecutter,cguardia/cookiecutter,kkujawinski/cookiecutter,tylerdave/cookiecutter,benthomasson/cookiecutter,Springerle/cookiecutter,ramiroluz/cookiecutter,michaeljoseph/cookiecutter,terryjbates/cookiecutter,janusnic/cookiecutter,pjbull/cookiecutter,venumech/cookiecutter,agconti/cookiecutter,drgarcia1986/cookiecutter,agconti/cookiecutter,janusnic/cookiecutter,ionelmc/cookiecutter,moi65/cookiecutter,drgarcia1986/cookiecutter,hackebrot/cookiecutter,moi65/cookiecutter,ionelmc/cookiecutter,Vauxoo/cookiecutter,cguardia/cookiecutter,Springerle/cookiecutter | tests/test_read_user_choice.py | tests/test_read_user_choice.py | # -*- coding: utf-8 -*-
import click
import pytest
from cookiecutter.compat import read_user_choice
OPTIONS = ['hello', 'world', 'foo', 'bar']
EXPECTED_PROMPT = """Select varname:
1 - hello
2 - world
3 - foo
4 - bar
Choose from 1, 2, 3, 4!"""
@pytest.mark.parametrize('user_choice, expected_value', enumerate(OPTIONS, 1))
def test_click_invocation(mocker, user_choice, expected_value):
choice = mocker.patch('click.Choice')
choice.return_value = click.Choice(OPTIONS)
prompt = mocker.patch('click.prompt')
prompt.return_value = str(user_choice)
assert read_user_choice('varname', OPTIONS) == expected_value
prompt.assert_called_once_with(
EXPECTED_PROMPT,
type=click.Choice(OPTIONS),
default='1'
)
@pytest.fixture(params=[1, True, False, None, [], {}])
def invalid_options(request):
return ['foo', 'bar', request.param]
def test_raise_on_non_str_options(invalid_options):
with pytest.raises(TypeError):
read_user_choice('foo', invalid_options)
| # -*- coding: utf-8 -*-
import click
import pytest
from cookiecutter.compat import read_user_choice
OPTIONS = ['hello', 'world', 'foo', 'bar']
EXPECTED_PROMPT = """Select varname:
1 - hello
2 - world
3 - foo
4 - bar
Choose from 1, 2, 3, 4!"""
@pytest.mark.parametrize('user_choice, expected_value', enumerate(OPTIONS, 1))
def test_click_invocation(mocker, user_choice, expected_value):
choice = mocker.patch('click.Choice')
choice.return_value = click.Choice(OPTIONS)
prompt = mocker.patch('click.prompt')
prompt.return_value = str(user_choice)
assert read_user_choice('varname', OPTIONS) == expected_value
prompt.assert_called_once_with(
EXPECTED_PROMPT,
type=click.Choice(OPTIONS),
default='1'
)
| bsd-3-clause | Python |
08d83f6999d8e5442acf8683bd8348baca386331 | Revert wsgi keep alive as well | craigcook/bedrock,flodolo/bedrock,flodolo/bedrock,alexgibson/bedrock,MichaelKohler/bedrock,flodolo/bedrock,MichaelKohler/bedrock,alexgibson/bedrock,sylvestre/bedrock,mozilla/bedrock,MichaelKohler/bedrock,sylvestre/bedrock,craigcook/bedrock,alexgibson/bedrock,craigcook/bedrock,pascalchevrel/bedrock,alexgibson/bedrock,pascalchevrel/bedrock,pascalchevrel/bedrock,pascalchevrel/bedrock,flodolo/bedrock,MichaelKohler/bedrock,sylvestre/bedrock,mozilla/bedrock,mozilla/bedrock,craigcook/bedrock,sylvestre/bedrock,mozilla/bedrock | wsgi/config.py | wsgi/config.py | # see http://docs.gunicorn.org/en/latest/configure.html#configuration-file
from os import getenv
bind = f'0.0.0.0:{getenv("PORT", "8000")}'
workers = getenv('WEB_CONCURRENCY', 2)
accesslog = '-'
errorlog = '-'
loglevel = getenv('LOGLEVEL', 'info')
# Larger keep-alive values maybe needed when directly talking to ELBs
# See https://github.com/benoitc/gunicorn/issues/1194
keepalive = getenv("WSGI_KEEP_ALIVE", 2)
worker_class = getenv("GUNICORN_WORKER_CLASS", "meinheld.gmeinheld.MeinheldWorker")
worker_connections = getenv("APP_GUNICORN_WORKER_CONNECTIONS", "1000")
worker_tmp_dir = "/dev/shm"
# Called just after a worker has been forked.
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
| # see http://docs.gunicorn.org/en/latest/configure.html#configuration-file
from os import getenv
bind = f'0.0.0.0:{getenv("PORT", "8000")}'
workers = getenv('WEB_CONCURRENCY', 2)
accesslog = '-'
errorlog = '-'
loglevel = getenv('LOGLEVEL', 'info')
# Larger keep-alive values maybe needed when directly talking to ELBs
# See https://github.com/benoitc/gunicorn/issues/1194
keepalive = getenv("WSGI_KEEP_ALIVE", 118)
worker_class = getenv("GUNICORN_WORKER_CLASS", "meinheld.gmeinheld.MeinheldWorker")
worker_connections = getenv("APP_GUNICORN_WORKER_CONNECTIONS", "1000")
worker_tmp_dir = "/dev/shm"
# Called just after a worker has been forked.
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
| mpl-2.0 | Python |
a428a64ec6c84d58b85d443e2b881545a5d0e6f1 | increase version to 0.9.3 | byteweaver/django-referral,Chris7/django-referral | referral/__init__.py | referral/__init__.py | __version__ = '0.9.3'
| __version__ = '0.9.2'
| mit | Python |
9b7326637e0318d546657489c4bd18d5a2d80794 | Use transform accessors | elemel/drillion | drillion/physics_component.py | drillion/physics_component.py | from drillion.component import Component
from drillion.maths import Transform2, Vector2
class PhysicsComponent(Component):
def __init__(self, transform_component, update_phase, position=(0.0, 0.0),
velocity=(0.0, 0.0), acceleration=(0.0, 0.0), angle=0.0,
angular_velocity=0.0, angular_acceleration=0.0):
super(PhysicsComponent, self).__init__()
self.position = Vector2(*position)
self.velocity = Vector2(*velocity)
self.acceleration = Vector2(*acceleration)
self.angle = angle
self.angular_velocity = angular_velocity
self.angular_acceleration = angular_acceleration
self.transform_component = transform_component
self.update_phase = update_phase
def create(self):
self.update_phase.add_handler(self)
def delete(self):
self.update_phase.add_handler(self)
def update(self, dt):
self.position += dt * self.velocity
self.velocity += dt * self.acceleration
self.angle += dt * self.angular_velocity
self.angular_velocity += dt * self.angular_acceleration
transform = Transform2(*self.transform_component.transform)
transform.reset()
transform.rotate(self.angle)
transform.translate(*self.position)
self.transform_component.transform = transform
| from drillion.component import Component
from drillion.maths import Vector2
class PhysicsComponent(Component):
def __init__(self, transform_component, update_phase, position=(0.0, 0.0),
velocity=(0.0, 0.0), acceleration=(0.0, 0.0), angle=0.0,
angular_velocity=0.0, angular_acceleration=0.0):
super(PhysicsComponent, self).__init__()
self.position = Vector2(*position)
self.velocity = Vector2(*velocity)
self.acceleration = Vector2(*acceleration)
self.angle = angle
self.angular_velocity = angular_velocity
self.angular_acceleration = angular_acceleration
self.transform_component = transform_component
self.update_phase = update_phase
def create(self):
self.update_phase.add_handler(self)
def delete(self):
self.update_phase.add_handler(self)
def update(self, dt):
self.position += dt * self.velocity
self.velocity += dt * self.acceleration
self.angle += dt * self.angular_velocity
self.angular_velocity += dt * self.angular_acceleration
transform = self.transform_component.transform
transform.reset()
transform.rotate(self.angle)
transform.translate(*self.position)
self.transform_component.transform = transform
| mit | Python |
e9ef64a037ecc1329f3d4f7885ee0ac1f5d1fd37 | make test_MassAnalysis use airfoilcoords fixture | helo9/wingstructure | tests/test_structuresection.py | tests/test_structuresection.py | import pytest
from wingstructure.structure import section, material, MassAnalysis
@pytest.fixture
def airfoilcoords():
import numpy as np
# load airfoil coordinates
return np.loadtxt('docs/usage/FX 61-184.dat', skiprows=1, delimiter=',')
def test_structurecreation(airfoilcoords):
# create material
amat = material.IsotropicMaterial(1.225, 210e3, 50e3)
# create sectionbase instance
secbase = section.SectionBase(airfoilcoords)
outerlayer = section.Layer(secbase, amat, thickness=0.001)
innerlayer = section.Layer(outerlayer, amat, thickness=0.001)
ispar = section.ISpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.5, 0.01)
boxspar = section.BoxSpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.01)
def test_MassAnalysis(airfoilcoords):
# create material
amat = material.IsotropicMaterial(1.225, 210e3, 50e3)
# create sectionbase instance
secbase = section.SectionBase(airfoilcoords)
outerlayer = section.Layer(secbase, amat, thickness=0.001)
innerlayer = section.Layer(outerlayer, amat, thickness=0.001)
ispar = section.ISpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.5, 0.01)
boxspar = section.BoxSpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.01)
massana = MassAnalysis(secbase)
cg, mass = massana.massproperties
#TODO: write a meaningful test
assert mass != None
| import pytest
from wingstructure.structure import section, material, MassAnalysis
@pytest.fixture
def airfoilcoords():
import numpy as np
# load airfoil coordinates
return np.loadtxt('docs/usage/FX 61-184.dat', skiprows=1, delimiter=',')
def test_structurecreation(airfoilcoords):
# create material
amat = material.IsotropicMaterial(1.225, 210e3, 50e3)
# create sectionbase instance
secbase = section.SectionBase(airfoilcoords)
outerlayer = section.Layer(secbase, amat, thickness=0.001)
innerlayer = section.Layer(outerlayer, amat, thickness=0.001)
ispar = section.ISpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.5, 0.01)
boxspar = section.BoxSpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.01)
def test_MassAnalysis():
# create material
amat = material.IsotropicMaterial(1.225, 210e3, 50e3)
# create sectionbase instance
secbase = section.SectionBase(airfoilcoords())
outerlayer = section.Layer(secbase, amat, thickness=0.001)
innerlayer = section.Layer(outerlayer, amat, thickness=0.001)
ispar = section.ISpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.5, 0.01)
boxspar = section.BoxSpar(innerlayer, amat, 0.5, 0.2, 0.01,
0.01)
massana = MassAnalysis(secbase)
cg, mass = massana.massproperties
#TODO: write a meaningful test
assert mass != None
| mit | Python |
7ef6ed7c8ad66f08ca69c767a2a4bdbaf1088fdf | update script to be runnalbe in prod env | codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator | reset-dev-farmland.py | reset-dev-farmland.py | import os
import json
import execjs
import urllib2
from sqlalchemy import create_engine
from farmsList.settings import DevConfig
if os.environ.get("FARMSLIST_ENV") == 'prod':
engine = create_engine(ProdConfig().SQLALCHEMY_DATABASE_URI)
else:
engine = create_engine(DevConfig().SQLALCHEMY_DATABASE_URI)
conn = engine.connect()
parcels = [{
'geometry': '{"type":"Polygon","coordinates":[[[-121.51367978210449,38.58853235229309],[-121.51347978210449,38.58853235229309],[-121.51347978210449,38.58833235229309],[-121.51367978210449,38.58833235229309],[-121.51367978210449,38.58853235229309]]]}',
'center': '{"geometry":{"type":"Point","coordinates":[-121.51357978210449,38.58843235229309]}}',
'soil': 'Yolo',
'size': '0.32',
'id': 10000001
},
{
'geometry': '{"type":"Polygon","coordinates":[[[-121.52367978210449,38.58853235229309],[-121.52347978210449,38.58853235229309],[-121.52347978210449,38.58833235229309],[-121.52367978210449,38.58833235229309],[-121.52367978210449,38.58853235229309]]]}',
'center': '{"geometry":{"type":"Point","coordinates":[-121.52357978210449,38.58843235229309]}}',
'soil': 'Made Land',
'size': '1.03',
'id': 10000002
},
{
'geometry': '{"type":"Polygon","coordinates":[[[-121.53367978210449,38.58853235229309],[-121.53347978210449,38.58853235229309],[-121.53347978210449,38.58833235229309],[-121.53367978210449,38.58833235229309],[-121.53367978210449,38.58853235229309]]]}',
'center': '{"geometry":{"type":"Point","coordinates":[-121.53357978210449,38.58843235229309]}}',
'soil': 'Sacramento',
'size': '2.01',
'id': 10000003
}]
for parcel in parcels:
conn.execute("INSERT INTO farmlands (geometry, size, zoning, center, water, id, soil) VALUES ('{}', {}, 'Ag', '{}', 0, {}, '{}')".format(parcel['geometry'], parcel['size'], parcel['center'], parcel['id'], parcel['soil']))
| import os
import json
import execjs
import urllib2
from sqlalchemy import create_engine
from farmsList.settings import DevConfig
engine = create_engine(DevConfig().SQLALCHEMY_DATABASE_URI)
conn = engine.connect()
parcels = [{
'geometry': '{"type":"Polygon","coordinates":[[[-121.51367978210449,38.58853235229309],[-121.51347978210449,38.58853235229309],[-121.51347978210449,38.58833235229309],[-121.51367978210449,38.58833235229309],[-121.51367978210449,38.58853235229309]]]}',
'center': '{"geometry":{"type":"Point","coordinates":[-121.51357978210449,38.58843235229309]}}',
'soil': 'Yolo',
'size': '0.32',
'id': 10000001
},
{
'geometry': '{"type":"Polygon","coordinates":[[[-121.52367978210449,38.58853235229309],[-121.52347978210449,38.58853235229309],[-121.52347978210449,38.58833235229309],[-121.52367978210449,38.58833235229309],[-121.52367978210449,38.58853235229309]]]}',
'center': '{"geometry":{"type":"Point","coordinates":[-121.52357978210449,38.58843235229309]}}',
'soil': 'Made Land',
'size': '1.03',
'id': 10000002
},
{
'geometry': '{"type":"Polygon","coordinates":[[[-121.53367978210449,38.58853235229309],[-121.53347978210449,38.58853235229309],[-121.53347978210449,38.58833235229309],[-121.53367978210449,38.58833235229309],[-121.53367978210449,38.58853235229309]]]}',
'center': '{"geometry":{"type":"Point","coordinates":[-121.53357978210449,38.58843235229309]}}',
'soil': 'Sacramento',
'size': '2.01',
'id': 10000003
}]
for parcel in parcels:
conn.execute("INSERT INTO farmlands (geometry, size, zoning, center, water, id, soil) VALUES ('{}', {}, 'Ag', '{}', 0, {}, '{}')".format(parcel['geometry'], parcel['size'], parcel['center'], parcel['id'], parcel['soil']))
| bsd-3-clause | Python |
e4ab382aa9463cc0fed49a6fc7e2af6f61a3557b | Use single quotes consistently | mesonbuild/wrapweb,mesonbuild/wrapweb,mesonbuild/wrapweb | wrapweb/app.py | wrapweb/app.py | # Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import os
# Create the application.
APP = flask.Flask(__name__)
APP.config.from_object('wrapweb.default_config.Config')
if 'WRAPDB_CONFIG' in os.environ:
APP.config.from_envvar('WRAPDB_CONFIG')
| # Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import os
# Create the application.
APP = flask.Flask(__name__)
APP.config.from_object("wrapweb.default_config.Config")
if "WRAPDB_CONFIG" in os.environ:
APP.config.from_envvar("WRAPDB_CONFIG")
| apache-2.0 | Python |
47e79b3a01ca4541d79412cdab856f84871e68f8 | Add auth protocol for keystone connection in vnc_api | Juniper/contrail-provisioning,Juniper/contrail-provisioning | templates/vnc_api_lib_ini_template.py | templates/vnc_api_lib_ini_template.py | import string
template = string.Template("""
[global]
;WEB_SERVER = 127.0.0.1
;WEB_PORT = 9696 ; connection through quantum plugin
WEB_SERVER = 127.0.0.1
WEB_PORT = 8082 ; connection to api-server directly
BASE_URL = /
;BASE_URL = /tenants/infra ; common-prefix for all URLs
; Authentication settings (optional)
[auth]
AUTHN_TYPE = keystone
AUTHN_PROTOCOL = http
AUTHN_SERVER=$__contrail_openstack_ip__
AUTHN_PORT = 35357
AUTHN_URL = /v2.0/tokens
""")
| import string
template = string.Template("""
[global]
;WEB_SERVER = 127.0.0.1
;WEB_PORT = 9696 ; connection through quantum plugin
WEB_SERVER = 127.0.0.1
WEB_PORT = 8082 ; connection to api-server directly
BASE_URL = /
;BASE_URL = /tenants/infra ; common-prefix for all URLs
; Authentication settings (optional)
[auth]
AUTHN_TYPE = keystone
AUTHN_SERVER=$__contrail_openstack_ip__
AUTHN_PORT = 35357
AUTHN_URL = /v2.0/tokens
""")
| apache-2.0 | Python |
24d7e3ae08c8f81e95a6292bcc8668a1a3a0ece2 | Add homo-binning | FederatedAI/FATE,FederatedAI/FATE,FederatedAI/FATE | python/fate_client/pipeline/component/__init__.py | python/fate_client/pipeline/component/__init__.py | from pipeline.component.column_expand import ColumnExpand
from pipeline.component.data_statistics import DataStatistics
from pipeline.component.dataio import DataIO
from pipeline.component.data_transform import DataTransform
from pipeline.component.evaluation import Evaluation
from pipeline.component.hetero_data_split import HeteroDataSplit
from pipeline.component.hetero_fast_secureboost import HeteroFastSecureBoost
from pipeline.component.hetero_feature_binning import HeteroFeatureBinning
from pipeline.component.hetero_feature_selection import HeteroFeatureSelection
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.hetero_linr import HeteroLinR
from pipeline.component.hetero_lr import HeteroLR
from pipeline.component.hetero_nn import HeteroNN
from pipeline.component.hetero_pearson import HeteroPearson
from pipeline.component.hetero_poisson import HeteroPoisson
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component.homo_data_split import HomoDataSplit
from pipeline.component.homo_lr import HomoLR
from pipeline.component.homo_nn import HomoNN
from pipeline.component.homo_secureboost import HomoSecureBoost
from pipeline.component.homo_feature_binning import HomoFeatureBinning
from pipeline.component.intersection import Intersection
from pipeline.component.local_baseline import LocalBaseline
from pipeline.component.one_hot_encoder import OneHotEncoder
from pipeline.component.psi import PSI
from pipeline.component.reader import Reader
from pipeline.component.scorecard import Scorecard
from pipeline.component.sampler import FederatedSample
from pipeline.component.scale import FeatureScale
from pipeline.component.union import Union
from pipeline.component.feldman_verifiable_sum import FeldmanVerifiableSum
from pipeline.component.sample_weight import SampleWeight
__all__ = ["DataStatistics", "DataIO", "Evaluation", "HeteroDataSplit",
"HeteroFastSecureBoost", "HeteroFeatureBinning", "HeteroFeatureSelection",
"HeteroFTL", "HeteroLinR", "HeteroLR", "HeteroNN",
"HeteroPearson", "HeteroPoisson", "HeteroSecureBoost", "HomoDataSplit",
"HomoLR", "HomoNN", "HomoSecureBoost", "HomoFeatureBinning", "Intersection",
"LocalBaseline", "OneHotEncoder", "PSI", "Reader", "Scorecard",
"FederatedSample", "FeatureScale", "Union", "ColumnExpand", "FeldmanVerifiableSum",
"SampleWeight", "DataTransform"]
| from pipeline.component.column_expand import ColumnExpand
from pipeline.component.data_statistics import DataStatistics
from pipeline.component.dataio import DataIO
from pipeline.component.data_transform import DataTransform
from pipeline.component.evaluation import Evaluation
from pipeline.component.hetero_data_split import HeteroDataSplit
from pipeline.component.hetero_fast_secureboost import HeteroFastSecureBoost
from pipeline.component.hetero_feature_binning import HeteroFeatureBinning
from pipeline.component.hetero_feature_selection import HeteroFeatureSelection
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.hetero_linr import HeteroLinR
from pipeline.component.hetero_lr import HeteroLR
from pipeline.component.hetero_nn import HeteroNN
from pipeline.component.hetero_pearson import HeteroPearson
from pipeline.component.hetero_poisson import HeteroPoisson
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component.homo_data_split import HomoDataSplit
from pipeline.component.homo_lr import HomoLR
from pipeline.component.homo_nn import HomoNN
from pipeline.component.homo_secureboost import HomoSecureBoost
from pipeline.component.intersection import Intersection
from pipeline.component.local_baseline import LocalBaseline
from pipeline.component.one_hot_encoder import OneHotEncoder
from pipeline.component.psi import PSI
from pipeline.component.reader import Reader
from pipeline.component.scorecard import Scorecard
from pipeline.component.sampler import FederatedSample
from pipeline.component.scale import FeatureScale
from pipeline.component.union import Union
from pipeline.component.feldman_verifiable_sum import FeldmanVerifiableSum
from pipeline.component.sample_weight import SampleWeight
__all__ = ["DataStatistics", "DataIO", "Evaluation", "HeteroDataSplit",
"HeteroFastSecureBoost", "HeteroFeatureBinning", "HeteroFeatureSelection",
"HeteroFTL", "HeteroLinR", "HeteroLR", "HeteroNN",
"HeteroPearson", "HeteroPoisson", "HeteroSecureBoost", "HomoDataSplit",
"HomoLR", "HomoNN", "HomoSecureBoost", "Intersection",
"LocalBaseline", "OneHotEncoder", "PSI", "Reader", "Scorecard",
"FederatedSample", "FeatureScale", "Union", "ColumnExpand", "FeldmanVerifiableSum",
"SampleWeight", "DataTransform"]
| apache-2.0 | Python |
02f4597f83b5bc515b5d583fa737863ba314f8ff | fix typo | genegis/genegis,genegis/genegis,genegis/genegis | Install/toolbox/scripts/utils.py | Install/toolbox/scripts/utils.py | # -*- coding: utf-8 -*-
import csv
import collections
import sys
import re
import os
import binascii
def parameters_from_args(defaults_tuple=None, sys_args=None):
"""Provided a set of tuples for default values, return a list of mapped
variables."""
defaults = collections.OrderedDict(defaults_tuple)
if defaults_tuple is not None:
args = len(sys_args) - 1
for i, key in enumerate(defaults.keys()):
idx = i + 1
if idx <= args:
defaults[key] = sys_args[idx]
return defaults
def msg(output_msg, mtype='message', exception=None):
if mtype == 'error':
arcpy_messages = arcpy.GetMessages()
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
if config.mode == 'script':
if exception:
# print the raw exception
print exception
# Arcpy and Python stuff, hopefully also helpful
err_msg = "ArcPy Error: {msg_text}\nPython Error: ${tbinfo}".format(
msg_text=arcpy_messages, tbinfo=tbinfo)
else:
arcpy.AddMessage(output_msg)
if exception:
arcpy.AddError(exception)
arcpy.AddError(arcpy_messages)
arcpy.AddMessage("Python Error: ${tbinfo}".format(tbinfo=tbinfo))
elif config.mode == 'script':
print output_msg
else:
if mtype == 'message':
arcpy.AddMessage(output_msg)
elif mtype == 'warning':
arcpy.AddWarning(output_msg)
| # -*- coding: utf-8 -*-
import csv
import collections
import sys
import re
import os
import binascii
def parameters_from_args(defaults_tuple=None, sys_args=None):
"""Provided a set of tuples for default values, return a list of mapped
variables."""
defaults = collections.OrderedDict(defaults_tuple)
if defaults_tuple is not None:
args = len(sys_args) - 1
for i, key in enumerate(defaults.keys()):
idx = i + 1
if idx <= args:
defaults[key] = sys_args[idx]
return defaults
def msg(output_msg, mtype='message', exception=None):
if mtype == 'error':
arcpy_messages = arcpy.GetMessages()
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
if config.mode == 'script':
if exception:
# print the raw exception
print exception
# Arcpy and Python stuff, hopefully also helpful
err_msg = "ArcPy Error: {msg_text}\nPython Error: ${tbinfo}".format(
msg_text=arcpy_messages, tbinfo=tbinfo)
else:
arcpy.AddMessage(output_msg)
if exception:
arcpy.AddError(exception)
arcpy.AddError(arcpy_messages)
arcpy.AddMessage("Python Error: ${tbinfo}".format(tbinfo=tbinfo))
elif config.mode == 'script':
print output_msg
else:
if mtype == 'message':
arcpy.AddMessage(output_msg)
elif mtype == 'warning':
arcpy.AddWarning(output_msg)
| mpl-2.0 | Python |
6afd022e784ecf6069542c796bbf53ccca05bdc1 | Fix deprecation warning | springload/madewithwagtail,springload/madewithwagtail,springload/madewithwagtail,springload/madewithwagtail | core/tests/test_template_tags.py | core/tests/test_template_tags.py | from django.template import Context, Template
from bs4 import BeautifulSoup
from core.tests.utils import *
from core.models import *
class TemplateTagsTestCase(WagtailTest):
def setUp(self):
super(TemplateTagsTestCase, self).setUp()
self.home_page = HomePage.objects.all()[0]
def test_set_var(self):
"""
Test set_var(parser, token) tag
{% set <var_name> = <var_value> %}
"""
# We just assign a simple string to a ver and check it gets printed nicely
rendered = Template("{% load core_tags %}{% set x = 'The hammer of Zeus' %}{{ x }}").render(Context({}))
self.assertEqual(rendered, "The hammer of Zeus")
def test_footer_menu(self):
"""
Test footer_menu(context) tag
{% footer_menu %}
"""
response = self.client.get(self.home_page.url)
# Render and check footer has 2 items
rendered = Template("{% load core_tags %}{% footer_menu name='Footer' current_page=self.home_page %}").render(Context({"request": response.request}))
soup = BeautifulSoup(rendered, 'html5lib')
self.assertEqual(
len(soup.findAll("a")),
4 # Number of links in the footer, should be 4 but fixtures are bit outdated
)
def test_main_menu(self):
"""
Test main_menu(context) tag
{% main_menu %}
"""
response = self.client.get(self.home_page.url)
# Render and check footer has 2 items
rendered = Template("{% load core_tags %}{% menu name='Main' current_page=self.home_page %}").render(Context({"request": response.request}))
soup = BeautifulSoup(rendered, 'html5lib')
self.assertEqual(
len(soup.findAll("a")),
5 # Number of links in the main nav
)
| from django.template import Context, Template
from bs4 import BeautifulSoup
from core.tests.utils import *
from core.models import *
class TemplateTagsTestCase(WagtailTest):
def setUp(self):
super(TemplateTagsTestCase, self).setUp()
self.home_page = HomePage.objects.all()[0]
def test_set_var(self):
"""
Test set_var(parser, token) tag
{% set <var_name> = <var_value> %}
"""
# We just assign a simple string to a ver and check it gets printed nicely
rendered = Template("{% load core_tags %}{% set x = 'The hammer of Zeus' %}{{ x }}").render(Context({}))
self.assertEqual(rendered, "The hammer of Zeus")
def test_footer_menu(self):
"""
Test footer_menu(context) tag
{% footer_menu %}
"""
response = self.client.get(self.home_page.url)
# Render and check footer has 2 items
rendered = Template("{% load core_tags %}{% footer_menu name='Footer' current_page=self.home_page %}").render(Context({"request": response.request}))
soup = BeautifulSoup(rendered)
self.assertEqual(
len(soup.findAll("a")),
4 # Number of links in the footer, should be 4 but fixtures are bit outdated
)
def test_main_menu(self):
"""
Test main_menu(context) tag
{% main_menu %}
"""
response = self.client.get(self.home_page.url)
# Render and check footer has 2 items
rendered = Template("{% load core_tags %}{% menu name='Main' current_page=self.home_page %}").render(Context({"request": response.request}))
soup = BeautifulSoup(rendered)
self.assertEqual(
len(soup.findAll("a")),
5 # Number of links in the main nav
)
| mit | Python |
20717a44fce087103dd92aa4e333fde4c9187f0e | Increment minor version to 0.4 | myint/cppclean,myint/cppclean,myint/cppclean,myint/cppclean | cpp/__init__.py | cpp/__init__.py | __version__ = '0.4'
| __version__ = '0.3'
| apache-2.0 | Python |
e3a994bff7a43968fcea53eee49b540b57c6e7f6 | Update supply_systems_database.py | architecture-building-systems/CEAforArcGIS,architecture-building-systems/CEAforArcGIS | cea/technologies/supply_systems_database.py | cea/technologies/supply_systems_database.py | """
This module provides an interface to the "supply_systems.xls" file (locator.get_database_supply_systems()) - the point
is to avoid reading this data (which is constant during the lifetime of a script) again and again.
"""
from __future__ import print_function
from __future__ import division
import pandas as pd
import cea.inputlocator
# keep track of locators previously seen so we don't re-read excel files twice
_locators = {}
class SupplySystemsDatabase(object):
"""
Expose the worksheets in supply_systems.xls as pandas.Dataframes.
"""
def __init__(self, locator):
"""
:param cea.inputlocator.InputLocator locator: provides the path to the
"""
conversion_systems_worksheets,\
distribution_systems_worksheets,\
feedstocks_worksheets = self.read_excel(locator)
self.FEEDSTOCKS = feedstocks_worksheets
self.PIPING = distribution_systems_worksheets["THERMAL_GRID"]
self.ALL_IN_ONE_SYSTEMS = conversion_systems_worksheets["ALL_IN_ONE_SYSTEMS"]
self.PV = conversion_systems_worksheets["PV"]
self.SC = conversion_systems_worksheets["SC"]
self.PVT = conversion_systems_worksheets["PVT"]
self.Boiler = conversion_systems_worksheets["Boiler"]
self.Furnace = conversion_systems_worksheets["Furnace"]
self.FC = conversion_systems_worksheets["FC"]
self.CCGT = conversion_systems_worksheets["CCGT"]
self.Chiller = conversion_systems_worksheets["Chiller"]
self.Absorption_chiller = conversion_systems_worksheets["Absorption_chiller"]
self.CT = conversion_systems_worksheets["CT"]
self.HEX = conversion_systems_worksheets["HEX"]
self.BH = conversion_systems_worksheets["BH"]
self.HP = conversion_systems_worksheets["HP"]
self.TES = conversion_systems_worksheets["TES"]
self.Pump = conversion_systems_worksheets["Pump"]
def read_excel(self, locator):
"""Read in the excel file, using the cache _locators"""
global _locators
if locator in _locators:
conversion_systems_worksheets, distribution_systems_worksheets, feedstocks_worksheets = _locators[locator]
else:
conversion_systems_worksheets = pd.read_excel(locator.get_database_conversion_systems(), sheet_name=None)
distribution_systems_worksheets = pd.read_excel(locator.get_database_distribution_systems(), sheet_name=None)
feedstocks_worksheets = pd.read_excel(locator.get_database_feedstocks(), sheet_name=None)
_locators[locator] = conversion_systems_worksheets, distribution_systems_worksheets, feedstocks_worksheets
return conversion_systems_worksheets, distribution_systems_worksheets, feedstocks_worksheets
| """
This module provides an interface to the "supply_systems.xls" file (locator.get_database_supply_systems()) - the point
is to avoid reading this data (which is constant during the lifetime of a script) again and again.
"""
from __future__ import print_function
from __future__ import division
import pandas as pd
import cea.inputlocator
# keep track of locators previously seen so we don't re-read excel files twice
_locators = {}
class SupplySystemsDatabase(object):
"""
Expose the worksheets in supply_systems.xls as pandas.Dataframes.
"""
def __init__(self, locator):
"""
:param cea.inputlocator.InputLocator locator: provides the path to the
"""
all_worksheets = self.read_excel(locator)
self.ALL_IN_ONE_SYSTEMS = all_worksheets["ALL_IN_ONE_SYSTEMS"]
self.FEEDSTOCKS = all_worksheets["FEEDSTOCKS"]
self.PV = all_worksheets["PV"]
self.SC = all_worksheets["SC"]
self.PVT = all_worksheets["PVT"]
self.Boiler = all_worksheets["Boiler"]
self.Furnace = all_worksheets["Furnace"]
self.FC = all_worksheets["FC"]
self.CCGT = all_worksheets["CCGT"]
self.Chiller = all_worksheets["Chiller"]
self.Absorption_chiller = all_worksheets["Absorption_chiller"]
self.CT = all_worksheets["CT"]
self.HEX = all_worksheets["HEX"]
self.BH = all_worksheets["BH"]
self.HP = all_worksheets["HP"]
self.TES = all_worksheets["TES"]
self.Pump = all_worksheets["Pump"]
self.PIPING = all_worksheets["PIPING"]
self.DETAILED_ELEC_COSTS = all_worksheets["DETAILED_ELEC_COSTS"]
def read_excel(self, locator):
"""Read in the excel file, using the cache _locators"""
global _locators
if locator in _locators:
all_worksheets = _locators[locator]
else:
all_worksheets = pd.read_excel(locator.get_database_supply_systems(), sheet_name=None)
_locators[locator] = all_worksheets
return all_worksheets | mit | Python |
e8b50a70fc7de1842ebe8bc796736459bf154432 | Add the new cluster validity methods in the module. | makism/dyfunconn | dyconnmap/cluster/__init__.py | dyconnmap/cluster/__init__.py | # -*- coding: utf-8 -*-
"""
"""
# Author: Avraam Marimpis <avraam.marimpis@gmail.com>
from .ng import NeuralGas
from .mng import MergeNeuralGas
from .rng import RelationalNeuralGas
from .gng import GrowingNeuralGas
from .som import SOM
from .umatrix import umatrix
from .validity import ray_turi, davies_bouldin
__all__ = [
"NeuralGas",
"MergeNeuralGas",
"RelationalNeuralGas",
"GrowingNeuralGas",
"SOM",
"umatrix",
"ray_turi",
"davies_bouldin",
]
| # -*- coding: utf-8 -*-
"""
"""
# Author: Avraam Marimpis <avraam.marimpis@gmail.com>
from .ng import NeuralGas
from .mng import MergeNeuralGas
from .rng import RelationalNeuralGas
from .gng import GrowingNeuralGas
from .som import SOM
from .umatrix import umatrix
__all__ = [
"NeuralGas",
"MergeNeuralGas",
"RelationalNeuralGas",
"GrowingNeuralGas",
"SOM",
"umatrix",
]
| bsd-3-clause | Python |
bfb26e4704dc5303593936eec3c33c56e88334ed | test new sharedMemory feature | jcarpent/eigenpy,stack-of-tasks/eigenpy,stack-of-tasks/eigenpy,jcarpent/eigenpy,jcarpent/eigenpy | unittest/python/test_return_by_ref.py | unittest/python/test_return_by_ref.py | import return_by_ref
from return_by_ref import Matrix, RowMatrix, Vector
import numpy as np
def test_shared(mat):
m_ref = mat.ref()
m_ref.fill(0)
m_copy = mat.copy()
assert np.array_equal(m_ref,m_copy)
m_const_ref = mat.const_ref()
assert np.array_equal(m_const_ref,m_copy)
assert np.array_equal(m_const_ref,m_ref)
m_ref.fill(1)
assert not np.array_equal(m_ref,m_copy)
assert np.array_equal(m_const_ref,m_ref)
try:
m_const_ref.fill(2)
assert False
except:
assert True
def test_not_shared(mat):
m_ref = mat.ref()
m_ref.fill(100.)
m_copy = mat.copy()
assert not np.array_equal(m_ref,m_copy)
m_const_ref = mat.const_ref()
assert np.array_equal(m_const_ref,m_copy)
assert not np.array_equal(m_const_ref,m_ref)
m_ref.fill(10.)
assert not np.array_equal(m_ref,m_copy)
assert not np.array_equal(m_const_ref,m_ref)
try:
m_const_ref.fill(2)
assert True
except:
assert False
rows = 10
cols = 30
mat = Matrix(rows,cols)
row_mat = RowMatrix(rows,cols)
vec = Vector(rows,1)
test_shared(mat)
test_shared(row_mat)
test_shared(vec)
return_by_ref.sharedMemory(False)
test_not_shared(mat)
test_not_shared(row_mat)
test_not_shared(vec)
| from return_by_ref import Matrix, RowMatrix, Vector
import numpy as np
def test(mat):
m_ref = mat.ref()
m_ref.fill(0)
m_copy = mat.copy()
assert np.array_equal(m_ref,m_copy)
m_const_ref = mat.const_ref()
assert np.array_equal(m_const_ref,m_copy)
assert np.array_equal(m_const_ref,m_ref)
m_ref.fill(1)
assert not np.array_equal(m_ref,m_copy)
assert np.array_equal(m_const_ref,m_ref)
try:
m_const_ref.fill(2)
assert False
except:
assert True
rows = 10
cols = 30
mat = Matrix(rows,cols)
row_mat = RowMatrix(rows,cols)
vec = Vector(rows,1)
test(mat)
test(row_mat)
test(vec)
| bsd-2-clause | Python |
3e92aac756581de951259d20f08c1dec14e86ecd | Add test capturing expectation. Ref #372. | jaraco/keyring | tests/backends/test_chainer.py | tests/backends/test_chainer.py | import pytest
import keyring.backends.chainer
from keyring import backend
@pytest.fixture
def two_keyrings(monkeypatch):
def get_two():
class Keyring1(backend.KeyringBackend):
priority = 1
def get_password(self, system, user):
return 'ring1-{system}-{user}'.format(**locals())
def set_password(self, system, user, password):
pass
class Keyring2(backend.KeyringBackend):
priority = 2
def get_password(self, system, user):
return 'ring2-{system}-{user}'.format(**locals())
def set_password(self, system, user, password):
raise NotImplementedError()
return Keyring1(), Keyring2()
monkeypatch.setattr('keyring.backend.get_all_keyring', get_two)
class TestChainer:
def test_chainer_gets_from_highest_priority(self, two_keyrings):
chainer = keyring.backends.chainer.ChainerBackend()
pw = chainer.get_password('alpha', 'bravo')
assert pw == 'ring2-alpha-bravo'
def test_chainer_defers_to_fail(self, monkeypatch):
"""
The Chainer backend should defer to the Fail backend when there are
no backends to be chained.
"""
monkeypatch.setattr('keyring.backend.get_all_keyring', tuple)
assert keyring.backend.by_priority(
keyring.backends.chainer.ChainerBackend
) < keyring.backend.by_priority(keyring.backends.fail.Keyring)
| import pytest
import keyring.backends.chainer
from keyring import backend
@pytest.fixture
def two_keyrings(monkeypatch):
def get_two():
class Keyring1(backend.KeyringBackend):
priority = 1
def get_password(self, system, user):
return 'ring1-{system}-{user}'.format(**locals())
def set_password(self, system, user, password):
pass
class Keyring2(backend.KeyringBackend):
priority = 2
def get_password(self, system, user):
return 'ring2-{system}-{user}'.format(**locals())
def set_password(self, system, user, password):
raise NotImplementedError()
return Keyring1(), Keyring2()
monkeypatch.setattr('keyring.backend.get_all_keyring', get_two)
class TestChainer:
def test_chainer_gets_from_highest_priority(self, two_keyrings):
chainer = keyring.backends.chainer.ChainerBackend()
pw = chainer.get_password('alpha', 'bravo')
assert pw == 'ring2-alpha-bravo'
| mit | Python |
8eaaa4e480ab4b771a51dbe46016fa186e860e22 | fix bug in nbio.django.views | nbio/nbio-django | nbio/django/views.py | nbio/django/views.py | __license__ = "Apache 2.0"
__copyright__ = "Copyright 2008 nb.io"
__author__ = "Randy Reddig - ydnar@nb.io"
from nbio.django.shortcuts import render_response
from django.http import Http404
DEFAULT_CONTENT_TYPE = 'text/html'
def null():
return
def auto(request, **kwargs):
try:
t = kwargs['template']
except:
raise Http404
return render_response(request, t)
| __license__ = "Apache 2.0"
__copyright__ = "Copyright 2008 nb.io"
__author__ = "Randy Reddig - ydnar@nb.io"
from nbio.django.shortcuts import render_response
from django.http import Http404
DEFAULT_CONTENT_TYPE = 'text/html'
def null():
return
def auto(request, **kwargs):
try:
t = kwargs['template']
except:
raise Http404
return render_response(t)
| apache-2.0 | Python |
eca2cf86ca180b2f32f2e305649bec8161fc8d54 | fix #366 | luis-rr/saga-python,mehdisadeghi/saga-python,telamonian/saga-python,mehdisadeghi/saga-python,luis-rr/saga-python,telamonian/saga-python,luis-rr/saga-python | saga/utils/pty_exceptions.py | saga/utils/pty_exceptions.py |
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import saga.exceptions as se
# ----------------------------------------------------------------
#
def translate_exception (e, msg=None) :
"""
In many cases, we should be able to roughly infer the exception cause
from the error message -- this is centrally done in this method. If
possible, it will return a new exception with a more concise error
message and appropriate exception type.
"""
if not issubclass (e.__class__, se.SagaException) :
# we do not touch non-saga exceptions
return e
if not issubclass (e.__class__, se.NoSuccess) :
# this seems to have a specific cause already, leave it alone
return e
cmsg = e._plain_message
if msg :
cmsg = "%s (%s)" % (cmsg, msg)
lmsg = cmsg.lower ()
if 'could not resolve hostname' in lmsg :
e = se.BadParameter (cmsg)
elif 'connection timed out' in lmsg :
e = se.BadParameter (cmsg)
elif 'connection refused' in lmsg :
e = se.BadParameter (cmsg)
elif 'auth' in lmsg :
e = se.AuthorizationFailed (cmsg)
elif 'man-in-the-middle' in lmsg :
e = se.AuthenticationFailed ("ssh key mismatch detected: %s" % cmesg)
elif 'pass' in lmsg :
e = se.AuthenticationFailed (cmsg)
elif 'ssh_exchange_identification' in lmsg :
e = se.AuthenticationFailed ("too frequent login attempts, or sshd misconfiguration: %s" % cmsg)
elif 'denied' in lmsg :
e = se.PermissionDenied (cmsg)
elif 'shared connection' in lmsg :
e = se.NoSuccess ("Insufficient system resources: %s" % cmsg)
elif 'pty allocation' in lmsg :
e = se.NoSuccess ("Insufficient system resources: %s" % cmsg)
elif 'Connection to master closed' in lmsg :
e = se.NoSuccess ("Connection failed (insufficient system resources?): %s" % cmsg)
return e
|
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import saga.exceptions as se
# ----------------------------------------------------------------
#
def translate_exception (e, msg=None) :
"""
In many cases, we should be able to roughly infer the exception cause
from the error message -- this is centrally done in this method. If
possible, it will return a new exception with a more concise error
message and appropriate exception type.
"""
if not issubclass (e.__class__, se.SagaException) :
# we do not touch non-saga exceptions
return e
if not issubclass (e.__class__, se.NoSuccess) :
# this seems to have a specific cause already, leave it alone
return e
cmsg = e._plain_message
if msg :
cmsg = "%s (%s)" % (cmsg, msg)
lmsg = cmsg.lower ()
if 'could not resolve hostname' in lmsg :
e = se.BadParameter (cmsg)
elif 'connection timed out' in lmsg :
e = se.BadParameter (cmsg)
elif 'connection refused' in lmsg :
e = se.BadParameter (cmsg)
elif 'auth' in lmsg :
e = se.AuthorizationFailed (cmsg)
elif 'pass' in lmsg :
e = se.AuthenticationFailed (cmsg)
elif 'ssh_exchange_identification' in lmsg :
e = se.AuthenticationFailed ("too frequent login attempts, or sshd misconfiguration: %s" % cmsg)
elif 'denied' in lmsg :
e = se.PermissionDenied (cmsg)
elif 'shared connection' in lmsg :
e = se.NoSuccess ("Insufficient system resources: %s" % cmsg)
elif 'pty allocation' in lmsg :
e = se.NoSuccess ("Insufficient system resources: %s" % cmsg)
elif 'Connection to master closed' in lmsg :
e = se.NoSuccess ("Connection failed (insufficient system resources?): %s" % cmsg)
return e
| mit | Python |
ef21a9aacc305df069635399bfd6dcb9a9da25b6 | Integrate LLVM at llvm/llvm-project@7dde2a59e61a | google/tsl,google/tsl,google/tsl | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "7dde2a59e61ad669b52a93b0259ce2ad20d642fa"
LLVM_SHA256 = "9710cf0d5a3076036c86a452b5f4e71812e4d9e0d8cd72f7bcb73db6b723216f"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "674729813e3be02af7bda02cfe577398763f58b9"
LLVM_SHA256 = "4abe4c3617c12b35db6b00c870bc5438c9f9cb487a79caa15cccb9a0fc029424"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
a3715ffe1d54d33abefad6266c64814d5e3c75ac | Integrate LLVM at llvm/llvm-project@c6013f71a455 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "c6013f71a4555f6d9ef9c60e6bc4376ad63f1c47"
LLVM_SHA256 = "644a1f9db6e55ba28fba1e03fe6c2d28514d47e1e02210b4b281868d7a7af70c"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "43d6991c2a4cc2ac374e68c029634f2b59ffdfdf"
LLVM_SHA256 = "6be97e134eab943941bbb06ad0c714070dc24cb4418a104813c1e9a2ca6655f7"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
f1099e777d2e14fa1429f72352dfd90fc08250e6 | Fix newlines to match formatting in the rest of project. | reactiveops/pentagon,reactiveops/pentagon,reactiveops/pentagon | pentagon/filters.py | pentagon/filters.py | import re
def register_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = register_filters()
def get_jinja_filters():
"""Return all registered custom jinja filters"""
return filter.all
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input) | import re
def register_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = register_filters()
def get_jinja_filters():
"""Return all registered custom jinja filters"""
return filter.all
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input) | apache-2.0 | Python |
f359b1feb5567dd3627eba0c03701177355c48a4 | Improve coverage in dakota.methods.base module | csdms/dakota,csdms/dakota | dakota/tests/test_dakota_base.py | dakota/tests/test_dakota_base.py | #!/usr/bin/env python
#
# Tests for dakota.methods.base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import raises, assert_true, assert_is_none
from dakota.methods.base import DakotaBase
from . import start_dir, data_dir
# Helpers --------------------------------------------------------------
class Concrete(DakotaBase):
"""A subclass of DakotaBase used for testing."""
def __init__(self):
DakotaBase.__init__(self)
self.variable_descriptors = ['x0', 'x1']
def method_block(self):
return DakotaBase.method_block(self)
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
def test_environment_block():
"""Test type of environment_block method results."""
s = c.environment_block()
assert_true(type(s) is str)
def test_method_block():
"""Test type of method_block method results."""
s = c.method_block()
assert_true(type(s) is str)
def test_variables_block():
"""Test type of variables_block method results."""
s = c.variables_block()
assert_true(type(s) is str)
def test_interface_block():
"""Test type of interface_block method results."""
s = c.interface_block()
assert_true(type(s) is str)
def test_responses_block():
"""Test type of responses_block method results."""
s = c.responses_block()
assert_true(type(s) is str)
| #!/usr/bin/env python
#
# Tests for dakota.methods.base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import raises, assert_true, assert_is_none
from dakota.methods.base import DakotaBase
from . import start_dir, data_dir
# Helpers --------------------------------------------------------------
class Concrete(DakotaBase):
"""A subclass of DakotaBase used for testing."""
def __init__(self):
DakotaBase.__init__(self)
def method_block(self):
pass
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
def test_environment_block():
"""Test type of environment_block method results."""
s = c.environment_block()
assert_true(type(s) is str)
def test_method_block():
"""Test type of method_block method results."""
s = c.method_block()
assert_is_none(s)
def test_variables_block():
"""Test type of variables_block method results."""
s = c.variables_block()
assert_true(type(s) is str)
def test_interface_block():
"""Test type of interface_block method results."""
s = c.interface_block()
assert_true(type(s) is str)
def test_responses_block():
"""Test type of responses_block method results."""
s = c.responses_block()
assert_true(type(s) is str)
| mit | Python |
52ba0bccca023eb480eecfe9a5448eb97c1656d5 | correct id | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/userreports/management/commands/rebuild_tables_by_domain.py | corehq/apps/userreports/management/commands/rebuild_tables_by_domain.py | from django.core.management.base import BaseCommand, CommandError
from corehq.apps.userreports import tasks
from corehq.apps.userreports.models import DataSourceConfiguration, StaticDataSourceConfiguration
class Command(BaseCommand):
help = "Rebuild all user configurable reporting tables in domain"
args = 'domain'
label = ""
def handle(self, *args, **options):
if len(args) < 1:
raise CommandError('Usage is rebuild_tables_by_domain %s' % self.args)
domain = args[0]
tables = StaticDataSourceConfiguration.by_domain(domain)
tables.extend(DataSourceConfiguration.by_domain(domain))
print "Rebuilding {} tables".format(len(tables))
for table in tables:
tasks.rebuild_indicators(table._id)
| from django.core.management.base import BaseCommand, CommandError
from corehq.apps.userreports import tasks
from corehq.apps.userreports.models import DataSourceConfiguration, StaticDataSourceConfiguration
class Command(BaseCommand):
help = "Rebuild all user configurable reporting tables in domain"
args = 'domain'
label = ""
def handle(self, *args, **options):
if len(args) < 1:
raise CommandError('Usage is rebuild_tables_by_domain %s' % self.args)
domain = args[0]
tables = StaticDataSourceConfiguration.by_domain(domain)
tables.extend(DataSourceConfiguration.by_domain(domain))
print "Rebuilding {} tables".format(len(tables))
for table in tables:
tasks.rebuild_indicators(table.config_id)
| bsd-3-clause | Python |
fe2e2825021ffd64d1c96608b8e789785d935e04 | put correct type in row descriptions | josh-mckenzie/cassandra-dbapi2,EnigmaCurry/cassandra-dbapi2,mshuler/cassandra-dbapi2,iflatness/cql,iGivefirst/cassandra-dbapi2,pcmanus/python-cql,maraca/cassandra-dbapi2,stanhu/cassandra-dbapi2,flowroute/cassandra-dbapi2,pandu-rao/cassandra-dbapi2,diezguerra/cassandra-dbapi2,CanWeStudio/cassandra-dbapi2 | cql/decoders.py | cql/decoders.py |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cql
from marshal import (unmarshallers, unmarshal_noop)
class SchemaDecoder(object):
"""
Decode binary column names/values according to schema.
"""
def __init__(self, schema):
self.schema = schema
def decode_description(self, row):
schema = self.schema
description = []
for column in row.columns:
name = column.name
comparator = schema.name_types.get(name, schema.default_name_type)
unmarshal = unmarshallers.get(comparator, unmarshal_noop)
validator = schema.value_types.get(name, schema.default_value_type)
description.append((unmarshal(name), validator, None, None, None, None, True))
return description
def decode_row(self, row):
schema = self.schema
values = []
for column in row.columns:
if column.value is None:
values.append(None)
continue
name = column.name
validator = schema.value_types.get(name, schema.default_value_type)
unmarshal = unmarshallers.get(validator, unmarshal_noop)
values.append(unmarshal(column.value))
return values
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cql
from marshal import (unmarshallers, unmarshal_noop)
class SchemaDecoder(object):
"""
Decode binary column names/values according to schema.
"""
def __init__(self, schema):
self.schema = schema
def decode_description(self, row):
schema = self.schema
description = []
for column in row.columns:
name = column.name
comparator = schema.name_types.get(name, schema.default_name_type)
unmarshal = unmarshallers.get(comparator, unmarshal_noop)
description.append((unmarshal(name), comparator, None, None, None, None, True))
return description
def decode_row(self, row):
schema = self.schema
values = []
for column in row.columns:
if column.value is None:
values.append(None)
continue
name = column.name
validator = schema.value_types.get(name, schema.default_value_type)
unmarshal = unmarshallers.get(validator, unmarshal_noop)
values.append(unmarshallers.get(validator, unmarshal_noop)(column.value))
return values
| apache-2.0 | Python |
ae1f3f182a2095b0f1607a8e8e0653cdb5b5f2e2 | Remove use of f-string in setup.py #3650 (#3662) | parrt/antlr4,parrt/antlr4,ericvergnaud/antlr4,ericvergnaud/antlr4,ericvergnaud/antlr4,antlr/antlr4,ericvergnaud/antlr4,parrt/antlr4,ericvergnaud/antlr4,parrt/antlr4,antlr/antlr4,ericvergnaud/antlr4,antlr/antlr4,ericvergnaud/antlr4,antlr/antlr4,ericvergnaud/antlr4,antlr/antlr4,parrt/antlr4,parrt/antlr4,antlr/antlr4,ericvergnaud/antlr4,parrt/antlr4,parrt/antlr4,antlr/antlr4,parrt/antlr4,antlr/antlr4,ericvergnaud/antlr4,antlr/antlr4,antlr/antlr4,parrt/antlr4 | runtime/Python3/setup.py | runtime/Python3/setup.py | from setuptools import setup
v = '4.10.1'
setup(
name='antlr4-python3-runtime',
version=v,
packages=['antlr4', 'antlr4.atn', 'antlr4.dfa', 'antlr4.tree', 'antlr4.error', 'antlr4.xpath'],
package_dir={'': 'src'},
install_requires=[
"typing ; python_version<'3.5'",
],
url='http://www.antlr.org',
license='BSD',
author='Eric Vergnaud, Terence Parr, Sam Harwell',
author_email='eric.vergnaud@wanadoo.fr',
entry_points={'console_scripts': ['pygrun=antlr4._pygrun:main']},
description='ANTLR %s runtime for Python 3' % v
)
| from setuptools import setup
v = '4.10.1'
setup(
name='antlr4-python3-runtime',
version=v,
packages=['antlr4', 'antlr4.atn', 'antlr4.dfa', 'antlr4.tree', 'antlr4.error', 'antlr4.xpath'],
package_dir={'': 'src'},
install_requires=[
"typing ; python_version<'3.5'",
],
url='http://www.antlr.org',
license='BSD',
author='Eric Vergnaud, Terence Parr, Sam Harwell',
author_email='eric.vergnaud@wanadoo.fr',
entry_points={'console_scripts': ['pygrun=antlr4._pygrun:main']},
description=f'ANTLR {v} runtime for Python 3'
)
| bsd-3-clause | Python |
45d9b9716e57e979ac35619c2508093a775b555d | add comment in sql | HirokiUmatani/PearPackage | PearPyPac/DB.py | PearPyPac/DB.py | # -*- coding: utf-8 -*-
import MySQLdb
import configparser
class PearMySQL(object):
def __init__(self):
""" initialize """
self.conf = None
self.connector = None
self.cursor = None
def setConfig(self, conf_path):
""" set configure """
self.conf = configparser.ConfigParser()
self.conf.read(conf_path)
def openDB(self):
""" open database """
self.openConnect()
self.openCursor()
def closeDB(self):
""" close database """
self.closeCursor()
self.closeConnector()
def openConnect(self):
""" set connector for database """
_host = self.conf.get("mysql", "host")
_db = self.conf.get("mysql", "db")
_user = self.conf.get("mysql", "user")
_passwd = self.conf.get("mysql", "passwd")
self.connector = MySQLdb.connect(host=_host, db=_db, user=_user,
passwd=_passwd)
def openCursor(self):
""" set cursor for database """
self.cursor = self.connector.cursor(MySQLdb.cursors.DictCursor)
def closeConnector(self):
""" close connector for database """
self.connector.close()
def closeCursor(self):
""" close cursor for database """
self.cursor.close()
# fetch request
def queryFetch(self, query):
self.openDB()
self.cursor.execute(query)
results = self.cursor.fetchall()
self.closeDB()
return results
# insert, update & delete request
def query(self, query):
self.openDB()
self.cursor.execute(query)
self.connector.commit()
self.closeDB()
if __name__ == '__main__':
db = PearMySQL()
| # -*- coding: utf-8 -*-
import MySQLdb
import configparser
class PearMySQL(object):
def __init__(self):
""" initialize """
self.conf = None
self.connector = None
self.cursor = None
def setConfig(self, conf_path):
""" set configure """
self.conf = configparser.ConfigParser()
self.conf.read(conf_path)
def openDB(self):
""" open database """
self.openConnect()
self.openCursor()
def closeDB(self):
""" close database """
self.closeCursor()
self.closeConnector()
def openConnect(self):
""" set connector for database """
_host = self.conf.get("mysql", "host")
_db = self.conf.get("mysql", "db")
_user = self.conf.get("mysql", "user")
_passwd = self.conf.get("mysql", "passwd")
self.connector = MySQLdb.connect(host=_host, db=_db, user=_user,
passwd=_passwd)
def openCursor(self):
""" set cursor for database """
self.cursor = self.connector.cursor(MySQLdb.cursors.DictCursor)
def closeConnector(self):
""" close connector for database """
self.connector.close()
def closeCursor(self):
""" close cursor for database """
self.cursor.close()
def queryFetch(self, query):
self.openDB()
self.cursor.execute(query)
results = self.cursor.fetchall()
self.closeDB()
return results
def query(self, query):
self.openDB()
self.cursor.execute(query)
self.connector.commit()
self.closeDB()
if __name__ == '__main__':
db = PearMySQL()
| mit | Python |
42758d9c709b9842242904c59e9be116958f0357 | Handle non-ascii characters better. | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | d1_client_cli/src/d1_client_cli/print_level.py | d1_client_cli/src/d1_client_cli/print_level.py | def print_level(level, msg):
''' Print the information in as Unicode safe manner as possible.
'''
for l in unicode(msg).split(u'\n'):
msg = u'%s%s' % (u'{0: <8s}'.format(level), unicode(l))
print msg.encode('utf-8')
def print_debug(msg):
print_level(u'DEBUG', unicode(msg))
def print_error(msg):
print_level(u'ERROR', unicode(msg))
def print_warn(msg):
print_level(u'WARN', unicode(msg))
def print_info(msg):
print_level(u'', unicode(msg))
| def print_level(level, msg):
for l in str(msg).split('\n'):
print('{0: <8s}{1}'.format(level, l))
def print_debug(msg):
print_level('DEBUG', msg)
def print_error(msg):
print_level('ERROR', msg)
def print_warn(msg):
print_level('WARN', msg)
def print_info(msg):
print_level('', msg)
| apache-2.0 | Python |
1917557f0a7f63184beedcad7276d717278561fc | convert timestamp to string in contrib | aureooms/sak,aureooms/sak | sake/contributions.py | sake/contributions.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import lib.git, lib.args, lib.json, lib.time
def commit ( message, duration, authors = None ) :
authors = lib.args.listify( authors )
with lib.json.proxy( "contributions.json", mode = "w", default = [] ) as contribs :
contribs.append( {
"authors" : authors,
"message" : message,
"duration" : duration,
"timestamp" : str( lib.time.nanoseconds() )
} )
lib.git.add( "contributions.json" )
lib.git.commit( "-m", message )
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import lib.git, lib.args, lib.json, lib.time
def commit ( message, duration, authors = None ) :
authors = lib.args.listify( authors )
with lib.json.proxy( "contributions.json", mode = "w", default = [] ) as contribs :
contribs.append( {
"authors" : authors,
"message" : message,
"duration" : duration,
"timestamp" : lib.time.nanoseconds()
} )
lib.git.add( "contributions.json" )
lib.git.commit( "-m", message )
| agpl-3.0 | Python |
6c0397c369ab9d6b9a4f6cbab27b490ef26ee728 | Fix TestConstVariables.py | llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb | packages/Python/lldbsuite/test/lang/c/const_variables/TestConstVariables.py | packages/Python/lldbsuite/test/lang/c/const_variables/TestConstVariables.py | """Check that compiler-generated constant values work correctly"""
from __future__ import print_function
import os, time
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class ConstVariableTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=["freebsd", "linux"],
compiler="clang", compiler_version=["<", "3.5"])
@expectedFailureAll(
oslist=["freebsd", "linux"],
compiler="clang", compiler_version=["=", "3.7"])
@expectedFailureAll(
oslist=["freebsd", "linux"],
compiler="clang", compiler_version=["=", "3.8"])
@expectedFailureAll(oslist=["freebsd", "linux"], compiler="icc")
@expectedFailureAll(archs=['mips', 'mipsel', 'mips64', 'mips64el'])
@expectedFailureWindows("llvm.org/pr24489: Name lookup not working correctly on Windows")
@expectedFailureWindows("llvm.org/pr24490: We shouldn't be using platform-specific names like `getpid` in tests")
def test_and_run_command(self):
"""Test interpreted and JITted expressions on constant values."""
self.build()
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
lldbutil.run_break_set_by_symbol (self, "main", num_expected_locations=1)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
self.runCmd("next")
self.runCmd("next")
# Try frame variable.
self.expect("frame variable index", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(int32_t) index = 512'])
# Try an interpreted expression.
self.expect("expr (index + 512)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['1024'])
# Try a JITted expression.
self.expect("expr (int)getpid(); (index - 256)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['256'])
self.runCmd("kill")
| """Check that compiler-generated constant values work correctly"""
from __future__ import print_function
import os, time
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class ConstVariableTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=["freebsd", "linux"],
compiler="clang", compiler_version=["<", "3.5"])
@expectedFailureAll(
oslist=["freebsd", "linux"],
compiler="clang", compiler_version=["=", "3.7"])
@expectedFailureAll(
oslist=["freebsd", "linux"],
compiler="clang", compiler_version=["=", "3.8"])
@expectedFailureAll(oslist=["freebsd", "linux"], compiler="icc")
@expectedFailureWindows("llvm.org/pr24489: Name lookup not working correctly on Windows")
@expectedFailureWindows("llvm.org/pr24490: We shouldn't be using platform-specific names like `getpid` in tests")
def test_and_run_command(self):
"""Test interpreted and JITted expressions on constant values."""
self.build()
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
lldbutil.run_break_set_by_symbol (self, "main", num_expected_locations=1)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
self.runCmd("next")
self.runCmd("next")
# Try frame variable.
self.expect("frame variable index", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(int32_t) index = 512'])
# Try an interpreted expression.
self.expect("expr (index + 512)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['1024'])
# Try a JITted expression.
self.expect("expr (int)getpid(); (index - 256)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['256'])
self.runCmd("kill")
| apache-2.0 | Python |
8724a598b7d08d9cd6cb77541625a80b53e96882 | Simplify `map_indexed` by building it from `starmap()` and `zip_with_iterable()`. @jcafhe | ReactiveX/RxPY,ReactiveX/RxPY | rx/core/operators/map.py | rx/core/operators/map.py | from typing import Callable, Any
from rx.internal.basic import identity
from rx.internal.utils import infinite
from rx import operators as ops
from rx.core import Observable, pipe
from rx.core.typing import Mapper, MapperIndexed, Observer, Disposable, Scheduler
# pylint: disable=redefined-builtin
def _map(mapper: Mapper = None) -> Callable[[Observable], Observable]:
_mapper = mapper or identity
def map(source: Observable) -> Observable:
"""Partially applied map operator.
Project each element of an observable sequence into a new form
by incorporating the element's index.
Example:
>>> map(source)
Args:
source: The observable source to transform.
Returns:
Returns an observable sequence whose elements are the
result of invoking the transform function on each element
of the source.
"""
def subscribe(obv: Observer, scheduler: Scheduler = None) -> Disposable:
def on_next(value: Any) -> None:
try:
result = _mapper(value)
except Exception as err: # pylint: disable=broad-except
obv.on_error(err)
else:
obv.on_next(result)
return source.subscribe_(on_next, obv.on_error, obv.on_completed, scheduler)
return Observable(subscribe)
return map
def _map_indexed(mapper_indexed: MapperIndexed = None) -> Callable[[Observable], Observable]:
def _identity(value: Any, index: int) -> Any:
return value
_mapper_indexed = mapper_indexed or _identity
return pipe(
ops.zip_with_iterable(infinite()),
ops.starmap(_mapper_indexed)
)
| from typing import Callable, Any
from rx.internal.basic import identity
from rx.core import Observable
from rx.core.typing import Mapper, MapperIndexed, Observer, Disposable, Scheduler
# pylint: disable=redefined-builtin
def _map(mapper: Mapper = None) -> Callable[[Observable], Observable]:
_mapper = mapper or identity
def map(source: Observable) -> Observable:
"""Partially applied map operator.
Project each element of an observable sequence into a new form
by incorporating the element's index.
Example:
>>> map(source)
Args:
source: The observable source to transform.
Returns:
Returns an observable sequence whose elements are the
result of invoking the transform function on each element
of the source.
"""
def subscribe(obv: Observer, scheduler: Scheduler = None) -> Disposable:
def on_next(value: Any) -> None:
try:
result = _mapper(value)
except Exception as err: # pylint: disable=broad-except
obv.on_error(err)
else:
obv.on_next(result)
return source.subscribe_(on_next, obv.on_error, obv.on_completed, scheduler)
return Observable(subscribe)
return map
def _map_indexed(mapper_indexed: MapperIndexed = None) -> Callable[[Observable], Observable]:
def _identity(value: Any, index: int) -> Any:
return value
_mapper_indexed = mapper_indexed or _identity
def map_indexed(source: Observable) -> Observable:
"""Partially applied indexed map operator.
Project each element of an observable sequence into a new form
by incorporating the element's index.
Example:
>>> ret = map_indexed(source)
Args:
source: The observable source to transform.
Returns:
Returns an observable sequence whose elements are the
result of invoking the transform function on each element
of the source.
"""
def subscribe(obv: Observer, scheduler: Scheduler = None) -> Disposable:
count = 0
def on_next(value: Any) -> None:
nonlocal count
try:
result = _mapper_indexed(value, count)
except Exception as err: # pylint: disable=broad-except
obv.on_error(err)
else:
count += 1
obv.on_next(result)
return source.subscribe_(on_next, obv.on_error, obv.on_completed, scheduler)
return Observable(subscribe)
return map_indexed
| mit | Python |
3d38848287b168cfbe3c9fe5297e7f322027634d | Delete excess code in the latest test scenario. | gnott/elife-poa-xml-generation,gnott/elife-poa-xml-generation | tests/test_parsePoaXml_test.py | tests/test_parsePoaXml_test.py | import unittest
import os
import re
os.sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import parsePoaXml
import generatePoaXml
# Import test settings last in order to override the regular settings
import poa_test_settings as settings
class TestParsePoaXml(unittest.TestCase):
def setUp(self):
self.passes = []
self.passes.append('elife-02935-v2.xml')
self.passes.append('elife-04637-v2.xml')
self.passes.append('elife-15743-v1.xml')
self.passes.append('elife-02043-v2.xml')
def test_parse(self):
for xml_file_name in self.passes:
file_path = settings.XLS_PATH + xml_file_name
articles = parsePoaXml.build_articles_from_article_xmls([file_path])
self.assertEqual(len(articles), 1)
if __name__ == '__main__':
unittest.main()
| import unittest
import os
import re
os.sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import parsePoaXml
import generatePoaXml
# Import test settings last in order to override the regular settings
import poa_test_settings as settings
def override_settings():
# For now need to override settings to use test data
generatePoaXml.settings = settings
def create_test_directories():
try:
os.mkdir(settings.TEST_TEMP_DIR)
except OSError:
pass
try:
os.mkdir(settings.TARGET_OUTPUT_DIR)
except OSError:
pass
class TestParsePoaXml(unittest.TestCase):
def setUp(self):
override_settings()
create_test_directories()
self.passes = []
self.passes.append('elife-02935-v2.xml')
self.passes.append('elife-04637-v2.xml')
self.passes.append('elife-15743-v1.xml')
self.passes.append('elife-02043-v2.xml')
def test_parse(self):
for xml_file_name in self.passes:
file_path = settings.XLS_PATH + xml_file_name
articles = parsePoaXml.build_articles_from_article_xmls([file_path])
self.assertEqual(len(articles), 1)
if __name__ == '__main__':
unittest.main()
| mit | Python |
2369cb9f3f94a979d48cd645b38c8b1ea0827ef9 | Set version as 1.16.0 | atztogo/phono3py,atztogo/phono3py,atztogo/phono3py,atztogo/phono3py | phono3py/version.py | phono3py/version.py | # Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "1.16.0"
| # Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "1.15.1-beta"
| bsd-3-clause | Python |
cd46a099b0af34125fcbd7ce1de1ae20ad002264 | Test minor modification | Sparkier/inviwo,Sparkier/inviwo,Sparkier/inviwo,inviwo/inviwo,Sparkier/inviwo,inviwo/inviwo,inviwo/inviwo,inviwo/inviwo,inviwo/inviwo,inviwo/inviwo,Sparkier/inviwo | tools/ivwpy/regression/test.py | tools/ivwpy/regression/test.py | #*********************************************************************************
#
# Inviwo - Interactive Visualization Workshop
#
# Copyright (c) 2013-2015 Inviwo Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#*********************************************************************************
import glob
import os
import json
from .. util import *
class Test:
def __init__(self, name, group, path):
self.group = group
self.path = path
self.name = name
self.script = ""
self.config = {}
self.workspaces = glob.glob(self.path +"/*.inv")
configfile = toPath([self.path, "config.json"])
if os.path.exists(configfile):
with open(configfile, 'r') as f:
self.config = json.load(f)
def __str__(self):
return self.toString()
def toString(self):
return self.group + "/" + self.name
def getWorkspaces(self):
return self.workspaces
def getImages(self):
imgs = glob.glob(self.path +"/*.png")
imgs = [os.path.relpath(x, self.path) for x in imgs]
return imgs
def report(self, report):
report['group'] = self.group
report['name'] = self.name
report['path'] = self.path
report['script'] = self.script
report['config'] = self.config
return report
def makeOutputDir(self, base):
if not os.path.isdir(base):
raise RegressionError("Output dir does not exsist: " + dir)
mkdir([base, self.group])
mkdir([base, self.group, self.name])
return toPath([base, self.group, self.name])
raise RegressionError("Invalid Test kind") | #*********************************************************************************
#
# Inviwo - Interactive Visualization Workshop
#
# Copyright (c) 2013-2015 Inviwo Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#*********************************************************************************
import glob
import os
import json
from .. util import *
class Test:
def __init__(self, name, group, path):
self.group = group
self.path = path
self.name = name
self.script = ""
self.config = {}
configfile = toPath([self.path, "config.json"])
if os.path.exists(configfile):
with open(configfile, 'r') as f:
self.config = json.load(f)
def __str__(self):
return self.toString()
def toString(self):
return self.group + "/" + self.name
def getWorkspaces(self):
paths = glob.glob(self.path +"/*.inv")
return paths
def getImages(self):
imgs = glob.glob(self.path +"/*.png")
imgs = [os.path.relpath(x, self.path) for x in imgs]
return imgs
def report(self, report):
report['group'] = self.group
report['name'] = self.name
report['path'] = self.path
report['script'] = self.script
report['config'] = self.config
return report
def makeOutputDir(self, base):
if not os.path.isdir(base):
raise RegressionError("Output dir does not exsist: " + dir)
mkdir([base, self.group])
mkdir([base, self.group, self.name])
return toPath([base, self.group, self.name])
raise RegressionError("Invalid Test kind") | bsd-2-clause | Python |
2c85ba795b9b3334f217fa9978634a4708efbe7d | Make new directory when extracting. | Arsey/keras-transfer-learning-for-oxford102 | create_caffe_splits.py | create_caffe_splits.py | #!/usr/bin/env python
import glob
import numpy as np
from scipy.io import loadmat
# Read .mat file containing training, testing, and validation sets.
setid = loadmat('data/setid.mat')
# The .mat file is 1-indexed, so we subtract one to match Caffe's convention.
idx_train = setid['trnid'][0] - 1
idx_test = setid['tstid'][0] - 1
idx_valid = setid['valid'][0] - 1
# Read .mat file containing image labels.
image_labels = loadmat('data/imagelabels.mat')['labels'][0]
# Subtract one to get 0-based labels
image_labels -= 1
files = sorted(glob.glob('data/oxford102/jpg/*.jpg'))
labels = np.array(zip(files, image_labels))
def write_set_file(fout, labels):
with open(fout, 'w+') as f:
for label in labels:
f.write('%s %s\n' % (label[0], label[1]))
np.random.seed(777)
idx_test_perm = idx_test[np.random.permutation(len(idx_test))]
idx_train_perm = idx_train[np.random.permutation(len(idx_train))]
CAFFE_HOME = os.getenv('CAFFE_HOME', '')
if CAFFE_HOME == '':
raise Exception('CAFFE_HOME must be set to the location of your Caffe installation.')
os.mkdir(CAFFE_HOME + '/data/oxford102')
write_set_file(CAFFE_HOME + '/data/oxford102/train.txt', labels[idx_train_perm,:])
write_set_file(CAFFE_HOME + '/data/oxford102/test.txt', labels[idx_test_perm,:])
| #!/usr/bin/env python
import glob
import numpy as np
from scipy.io import loadmat
# Read .mat file containing training, testing, and validation sets.
setid = loadmat('data/setid.mat')
# The .mat file is 1-indexed, so we subtract one to match Caffe's convention.
idx_train = setid['trnid'][0] - 1
idx_test = setid['tstid'][0] - 1
idx_valid = setid['valid'][0] - 1
# Read .mat file containing image labels.
image_labels = loadmat('data/imagelabels.mat')['labels'][0]
# Subtract one to get 0-based labels
image_labels -= 1
files = sorted(glob.glob('data/oxford102/jpg/*.jpg'))
labels = np.array(zip(files, image_labels))
def write_set_file(fout, labels):
with open(fout, 'w+') as f:
for label in labels:
f.write('%s %s\n' % (label[0], label[1]))
np.random.seed(777)
idx_test_perm = idx_test[np.random.permutation(len(idx_test))]
idx_train_perm = idx_train[np.random.permutation(len(idx_train))]
write_set_file('data/train.txt', labels[idx_train_perm,:])
write_set_file('data/test.txt', labels[idx_test_perm,:])
| mit | Python |
02d266b6e34b84d4cca5bcfc05d78490a1d10dec | Print import error when plugin import fails (#6748) | mociepka/saleor,mociepka/saleor,mociepka/saleor | saleor/plugins/checks.py | saleor/plugins/checks.py | from typing import TYPE_CHECKING, List
from django.conf import settings
from django.core.checks import Error, register
from django.utils.module_loading import import_string
if TYPE_CHECKING:
from .base_plugin import BasePlugin
@register()
def check_plugins(app_configs, **kwargs):
"""Confirm a correct import of plugins and manager."""
errors = []
check_manager(errors)
plugins = settings.PLUGINS or []
for plugin_path in plugins:
check_single_plugin(plugin_path, errors)
return errors
def check_manager(errors: List[Error]):
if not hasattr(settings, "PLUGINS_MANAGER") or not settings.PLUGINS_MANAGER:
errors.append(Error("Settings should contain PLUGINS_MANAGER env"))
return
try:
import_string(settings.PLUGINS_MANAGER)
except ImportError:
errors.append(
Error("Plugins Manager path: %s doesn't exist" % settings.PLUGINS_MANAGER)
)
def check_single_plugin(plugin_path: str, errors: List[Error]):
if not plugin_path:
errors.append(Error("Wrong plugin_path %s" % plugin_path))
return
try:
plugin_class = import_string(plugin_path)
except ImportError as e:
errors.append(Error(f"Failed to import plugin {plugin_path}: {e}"))
if not errors:
check_plugin_fields(["PLUGIN_ID"], plugin_class, errors)
def check_plugin_fields(
fields: List[str], plugin_class: "BasePlugin", errors: List[Error]
):
name = plugin_class.__name__ # type: ignore
for field in fields:
if not getattr(plugin_class, field, None):
errors.append(Error(f"Missing field {field} for plugin - {name}"))
| from typing import TYPE_CHECKING, List
from django.conf import settings
from django.core.checks import Error, register
from django.utils.module_loading import import_string
if TYPE_CHECKING:
from .base_plugin import BasePlugin
@register()
def check_plugins(app_configs, **kwargs):
"""Confirm a correct import of plugins and manager."""
errors = []
check_manager(errors)
plugins = settings.PLUGINS or []
for plugin_path in plugins:
check_single_plugin(plugin_path, errors)
return errors
def check_manager(errors: List[Error]):
if not hasattr(settings, "PLUGINS_MANAGER") or not settings.PLUGINS_MANAGER:
errors.append(Error("Settings should contain PLUGINS_MANAGER env"))
return
try:
import_string(settings.PLUGINS_MANAGER)
except ImportError:
errors.append(
Error("Plugins Manager path: %s doesn't exist" % settings.PLUGINS_MANAGER)
)
def check_single_plugin(plugin_path: str, errors: List[Error]):
if not plugin_path:
errors.append(Error("Wrong plugin_path %s" % plugin_path))
return
try:
plugin_class = import_string(plugin_path)
except ImportError:
errors.append(Error("Plugin with path: %s doesn't exist" % plugin_path))
if not errors:
check_plugin_fields(["PLUGIN_ID"], plugin_class, errors)
def check_plugin_fields(
fields: List[str], plugin_class: "BasePlugin", errors: List[Error]
):
name = plugin_class.__name__ # type: ignore
for field in fields:
if not getattr(plugin_class, field, None):
errors.append(Error(f"Missing field {field} for plugin - {name}"))
| bsd-3-clause | Python |
7ccf28f42896367862612a3fd5ffbcb0cf53f671 | fix bug | windprog/requestspool | requestspool/http.py | requestspool/http.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 windpro
Author : windpro
E-mail : windprog@gmail.com
Date : 14/12/26
Desc : 发起http请求的相关方法和http信息
"""
import requests
from wsgiref.util import is_hop_by_hop
from . import config
from . import httpinfo
from .interface import BaseHttpInfo
'''
httplib版本:https://github.com/whitmo/WSGIProxy/blob/master/wsgiproxy/exactproxy.py:proxy_exact_request
'''
def call_http_request(url, method, req_headers=None, req_data=None, req_query_string=None, **kwargs):
if config.DEBUG:
from requests.models import RequestEncodingMixin
import os
from publish import ClientService
print 'calling http %s%s pid:%s now_client_count:%s' % (
url, '?' + RequestEncodingMixin._encode_params(req_query_string) if req_query_string else '',
os.getpid(), len(ClientService.clients))
return getattr(requests, method.lower())('%s' % url, params=req_query_string, data=req_data, headers=req_headers,
**kwargs)
def parse_requests_result(result):
headers = result.headers
for key, val in headers.iteritems():
if is_hop_by_hop(key):
headers.pop(key)
elif key.lower() == 'content-encoding' and 'zip' in val:
headers.pop(key)
status_code = result.status_code
output = result.content
if 'Content-Length' in headers:
# 让wsgi模块自行计算解压之后的字节大小
headers.pop('Content-Length')
return status_code, headers, output
def get_http_result(**kwargs):
return parse_requests_result(call_http_request(**kwargs))
def get_HttpInfo_class(version):
return getattr(httpinfo, "HttpInfoVersion%s" % version)
HttpInfo = get_HttpInfo_class(config.DEFAULT_HTTPINFO_VERSION)
assert issubclass(HttpInfo, BaseHttpInfo)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 windpro
Author : windpro
E-mail : windprog@gmail.com
Date : 14/12/26
Desc : 发起http请求的相关方法和http信息
"""
import requests
from wsgiref.util import is_hop_by_hop
from . import config
from . import httpinfo
from .interface import BaseHttpInfo
'''
httplib版本:https://github.com/whitmo/WSGIProxy/blob/master/wsgiproxy/exactproxy.py:proxy_exact_request
'''
def call_http_request(url, method, req_headers=None, req_data=None, req_query_string=None, **kwargs):
if config.DEBUG or True:
from requests.models import RequestEncodingMixin
import os
from publish import ClientService
print 'calling http %s%s pid:%s now_client_count:%s' % (
url, '?' + RequestEncodingMixin._encode_params(req_query_string) if req_query_string else '',
os.getpid(), len(ClientService.clients))
return getattr(requests, method.lower())('%s' % url, params=req_query_string, data=req_data, headers=req_headers,
**kwargs)
def parse_requests_result(result):
headers = result.headers
for key, val in headers.iteritems():
if is_hop_by_hop(key):
headers.pop(key)
elif key.lower() == 'content-encoding' and 'zip' in val:
headers.pop(key)
status_code = result.status_code
output = result.content
if 'Content-Length' in headers:
# 让wsgi模块自行计算解压之后的字节大小
headers.pop('Content-Length')
return status_code, headers, output
def get_http_result(**kwargs):
return parse_requests_result(call_http_request(**kwargs))
def get_HttpInfo_class(version):
return getattr(httpinfo, "HttpInfoVersion%s" % version)
HttpInfo = get_HttpInfo_class(config.DEFAULT_HTTPINFO_VERSION)
assert issubclass(HttpInfo, BaseHttpInfo)
| mit | Python |
dd896a3baabb7f6eb9cc24845b5842dd5a25c6d0 | Bump version to 0.1.2. | audreyr/design,audreyr/design | design/__init__.py | design/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.1.2'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.1.1'
| bsd-3-clause | Python |
c15db53c4accf690f88392bca5aaa42ade97ea5e | Add trailing slash to app's website URL when generating previously shortened URLs | chezmix/cuenco,chezmix/cuenco | cuenco/views.py | cuenco/views.py | import md5
from cuenco import app, db
from cuenco.models import WebLink
from flask import request, url_for, render_template, redirect, jsonify
from urlparse import urlparse
from sqlalchemy import func
from datetime import datetime
#base-62 encode a number
def encode(num):
if num < 1: raise Exception("encode: Number must be positive.")
charset = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
base = len(charset)
encoding = ''
while num > 0:
i = num % base
encoding = charset[i] + encoding
num = (num - i) / base
return encoding
@app.route('/')
def index():
return render_template('search_box.html')
@app.route('/<url_hash>')
def link_redirect(url_hash):
existing_link = WebLink.query.filter_by(url_hash = url_hash).first()
if (existing_link is None):
return render_template('404.html'), 404
else:
existing_link.views += 1
db.session.commit()
return redirect(existing_link.long_url)
@app.route('/recent')
def recent_entries():
entries = WebLink.query.order_by(WebLink.date.desc()).limit(10)
return render_template('links.html', entries=entries, header="Recent")
@app.route('/popular')
def popular_entries():
entries = WebLink.query.order_by(WebLink.views.desc()).limit(10)
return render_template('links.html', entries=entries, header="Popular")
@app.route('/_urlgen', methods=['POST'])
def generate_short_url():
link = WebLink(request.form['url'])
domain = urlparse(request.url).netloc
#return invalid if the URL is invalid or contains this site's domain name
if not link.is_valid() or urlparse(link.long_url).netloc == domain:
return jsonify(result="Invalid URL")
url_md5 = md5.new(link.long_url).hexdigest()
existing_link = WebLink.query.filter_by(md5_hash = url_md5).first()
if (existing_link is not None):
result = app.config["WEBSITE_URL"] + "/" + str(existing_link.url_hash)
else:
id_next = int(db.session.query(func.max(WebLink.id))[0][0] or 0) + 1
hash_num = (2654435761 * id_next) % 4294967296 #perfect hash function
link.url_hash = encode(hash_num)
link.md5_hash = url_md5
link.date = datetime.now()
db.session.add(link)
db.session.commit()
result = app.config["WEBSITE_URL"] + "/" + link.url_hash
return jsonify(result=result)
| import md5
from cuenco import app, db
from cuenco.models import WebLink
from flask import request, url_for, render_template, redirect, jsonify
from urlparse import urlparse
from sqlalchemy import func
from datetime import datetime
#base-62 encode a number
def encode(num):
if num < 1: raise Exception("encode: Number must be positive.")
charset = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
base = len(charset)
encoding = ''
while num > 0:
i = num % base
encoding = charset[i] + encoding
num = (num - i) / base
return encoding
@app.route('/')
def index():
return render_template('search_box.html')
@app.route('/<url_hash>')
def link_redirect(url_hash):
existing_link = WebLink.query.filter_by(url_hash = url_hash).first()
if (existing_link is None):
return render_template('404.html'), 404
else:
existing_link.views += 1
db.session.commit()
return redirect(existing_link.long_url)
@app.route('/recent')
def recent_entries():
entries = WebLink.query.order_by(WebLink.date.desc()).limit(10)
return render_template('links.html', entries=entries, header="Recent")
@app.route('/popular')
def popular_entries():
entries = WebLink.query.order_by(WebLink.views.desc()).limit(10)
return render_template('links.html', entries=entries, header="Popular")
@app.route('/_urlgen', methods=['POST'])
def generate_short_url():
link = WebLink(request.form['url'])
domain = urlparse(request.url).netloc
#return invalid if the URL is invalid or contains this site's domain name
if not link.is_valid() or urlparse(link.long_url).netloc == domain:
return jsonify(result="Invalid URL")
url_md5 = md5.new(link.long_url).hexdigest()
existing_link = WebLink.query.filter_by(md5_hash = url_md5).first()
if (existing_link is not None):
result = app.config["WEBSITE_URL"] + str(existing_link.url_hash)
else:
id_next = int(db.session.query(func.max(WebLink.id))[0][0] or 0) + 1
hash_num = (2654435761 * id_next) % 4294967296 #perfect hash function
link.url_hash = encode(hash_num)
link.md5_hash = url_md5
link.date = datetime.now()
db.session.add(link)
db.session.commit()
result = app.config["WEBSITE_URL"] + "/" + link.url_hash
return jsonify(result=result)
| mit | Python |
a97c41f568ca2f3b66e748c4da3c5b283036bdaf | save iteratively | Neurosim-lab/netpyne,Neurosim-lab/netpyne | examples/asdEvol/netParams.py | examples/asdEvol/netParams.py | from netpyne import specs, sim
try:
from __main__ import cfg
except:
from simConfig import cfg
# Network parameters
netParams = specs.NetParams()
# --------------------------------------------------------
# Simple network
# --------------------------------------------------------
# Population parameters
netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'}
netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'}
# Cell property rules
cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}}
cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}}
cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0}
cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70}
netParams.cellParams['PYRrule'] = cellRule
# Synaptic mechanism parameters
netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 5, 'e': 0}
# Stimulation parameters
netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 10, 'noise': 0.5}
netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'}
# Cell connectivity rules
netParams.connParams['S->M'] = {
'preConds': {'pop': 'S'},
'postConds': {'pop': 'M'},
'probability': cfg.prob,
'weight': cfg.weight,
'delay': cfg.delay,
'synMech': 'exc'} | from netpyne import specs, sim
try:
from __main__ import cfg
except:
from simConfig import cfg
# Network parameters
netParams = specs.NetParams()
# --------------------------------------------------------
# Simple network
# --------------------------------------------------------
# Population parameters
netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'}
netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'}
# Cell property rules
cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}}
cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}}
cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0}
cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70}
netParams.cellParams['PYRrule'] = cellRule
# Synaptic mechanism parameters
netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 5, 'e': 0}
# Stimulation parameters
netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 10, 'noise': 0.5}
netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'}
# Cell connectivity rules
netParams.connParams['S->M'] = {
'preConds': {'pop': 'S'},
'postConds': {'pop': 'M'},
'probability': cfg.prob,
'weight': cfg.weight,
'delay': cfg.delay,
'synMech': 'exc'
| mit | Python |
3ae086aff4ac24792c026e49775dc8feca2aa4eb | Add filename sanitiser | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/utils/general.py | salt/utils/general.py | # -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os.path
from builtins import str as text
from salt.exceptions import CommandExecutionError
class InputSanitizer(object):
@staticmethod
def trim(value):
'''
Raise an exception if value is empty. Otherwise strip it down.
:param value:
:return:
'''
value = (value or '').strip()
if not value:
raise CommandExecutionError("Empty value during sanitation")
return text(value)
@staticmethod
def filename(value):
'''
Remove everything that would affect paths in the filename
:param value:
:return:
'''
return re.sub('[^a-zA-Z0-9.-_ ]', '', os.path.basename(InputSanitizer.trim(value)))
clean = InputSanitizer()
| # -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os.path
from builtins import str as text
from salt.exceptions import CommandExecutionError
class InputSanitizer(object):
@staticmethod
def trim(value):
'''
Raise an exception if value is empty. Otherwise strip it down.
:param value:
:return:
'''
value = (value or '').strip()
if not value:
raise CommandExecutionError("Empty value during sanitation")
return text(value)
clean = InputSanitizer()
| apache-2.0 | Python |
bcfe1bafd8e28d5be0cc551930d0d43149282e71 | Fix url | NovelTorpedo/noveltorpedo,NovelTorpedo/noveltorpedo,NovelTorpedo/noveltorpedo,NovelTorpedo/noveltorpedo | noveltorpedo/urls.py | noveltorpedo/urls.py | from django.conf.urls import include, url
from . import views
app_name = 'noveltorpedo'
urlpatterns = [
url(r'^', include('haystack.urls')),
]
| from django.conf.urls import include, url
from . import views
app_name = 'noveltorpedo'
urlpatterns = [
url(r'^$', include('haystack.urls')),
]
| mit | Python |
4ee2986eb12ff06e6e31c362ac2fa873eb3429b9 | use os.makedirs to create the full STATIC_PATH/directory if needed | boblefrag/django-bower-app,levkowetz/django-bower-app | djangobwr/management/commands/bower_install.py | djangobwr/management/commands/bower_install.py | import os
import json
import tempfile
import shutil
from subprocess import call
from django.core.management.base import BaseCommand
from django.conf import settings
from djangobwr.finders import AppDirectoriesFinderBower
class Command(BaseCommand):
def handle(self, *args, **options):
temp_dir = tempfile.mkdtemp()
for path, storage in AppDirectoriesFinderBower().list([]):
original_file = unicode(os.path.join(storage.location, path))
if "bower.json" in path and not\
os.path.split(path)[1].startswith("."):
call(["bower",
"install",
original_file,
"--config.cwd={}".format(temp_dir),
"-p"])
bower_dir = os.path.join(temp_dir, "bower_components")
for directory in os.listdir(bower_dir):
if directory != "static":
bower = json.loads(
open(os.path.join(bower_dir, directory,
"bower.json")).read())
if not os.path.exists(
os.path.join(settings.STATIC_ROOT, directory)):
os.makedirs(os.path.join(settings.STATIC_ROOT, directory))
if not isinstance(bower["main"], list):
main = [bower["main"]]
else:
main = bower["main"]
for path in main:
shutil.copy(os.path.join(bower_dir,
directory,
path),
os.path.join(settings.STATIC_ROOT, directory))
| import os
import json
import tempfile
import shutil
from subprocess import call
from django.core.management.base import BaseCommand
from django.conf import settings
from djangobwr.finders import AppDirectoriesFinderBower
class Command(BaseCommand):
def handle(self, *args, **options):
temp_dir = tempfile.mkdtemp()
for path, storage in AppDirectoriesFinderBower().list([]):
original_file = unicode(os.path.join(storage.location, path))
if "bower.json" in path and not\
os.path.split(path)[1].startswith("."):
call(["bower",
"install",
original_file,
"--config.cwd={}".format(temp_dir),
"-p"])
bower_dir = os.path.join(temp_dir, "bower_components")
for directory in os.listdir(bower_dir):
if directory != "static":
bower = json.loads(
open(os.path.join(bower_dir, directory,
"bower.json")).read())
if not os.path.exists(settings.STATIC_ROOT):
os.mkdir(settings.STATIC_ROOT)
if not os.path.exists(
os.path.join(settings.STATIC_ROOT, directory)):
os.mkdir(os.path.join(settings.STATIC_ROOT, directory))
if not isinstance(bower["main"], list):
main = [bower["main"]]
else:
main = bower["main"]
for path in main:
shutil.copy(os.path.join(bower_dir,
directory,
path),
os.path.join(settings.STATIC_ROOT, directory))
| bsd-3-clause | Python |
500253425b2ea0dd79359378f8c6fc41e191d52c | Integrate LLVM at llvm/llvm-project@a617ff0ba001 | google/tsl,google/tsl,google/tsl | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "a617ff0ba0017b1df010d480eceb13066ecd122e"
LLVM_SHA256 = "019068664d4e73d1d3b4bdd56237d1e19251858eafc88d108428f9df98b6d074"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "163bb6d64e5f1220777c3ec2a8b58c0666a74d91"
LLVM_SHA256 = "f36211d9e34dcdd364bdef02efeadacca7e21c3e1b2ee73a2e60286bcd67db7e"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
3414a8320153c8ae850c5382c35bd2b6c1a0b9a6 | Integrate LLVM at llvm/llvm-project@f2b94bd7eaa8 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "f2b94bd7eaa83d853dc7568fac87b1f8bf4ddec6"
LLVM_SHA256 = "ac53a2a6516f84031d9a61d35700b0e6ba799292cfb54cfc17ed48958de8ff49"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "bc432c96349c1b0a381b824f11b057ff0de0b571"
LLVM_SHA256 = "a49bb4d643f35f63d72ccea0d3fe09cf374908a2400197ce5391f60596be0701"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
7b92eaec3eefc4c839fee3323fb7a970a5b72f64 | Integrate LLVM at llvm/llvm-project@4be3fc35aa8b | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "4be3fc35aa8b27494968e9a52eb0afa0672d98e7"
LLVM_SHA256 = "fdc350e798d8870496bd8aa4e6de6b76ae2423f1d0a4e88ff238ad9542683c00"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "8e82bc840de5b2264875a5f0967845867833ccfb"
LLVM_SHA256 = "683e8d615013efe0abafd3098c05e7f348d829fd0adfcdcf441236780bdff074"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
2db334e452e2ee2d5f0cbc516dc6cb04b61e598d | Check for `GNdr` grammeme in `gender-match` label | bureaucratic-labs/yargy | yargy/labels.py | yargy/labels.py | GENDERS = ("masc", "femn", "neut", "Ms-f", "GNdr")
def gram_label(token, value, stack):
return value in token.grammemes
def gram_not_label(token, value, stack):
return not value in token.grammemes
def gender_match_label(token, index, stack, genders=GENDERS):
results = ((g in t.grammemes for g in genders) for t in (stack[index], token))
*case_token_genders, case_token_msf, case_token_gndr = next(results)
*candidate_token_genders, candidate_token_msf, candidate_token_gndr = next(results)
if not candidate_token_genders == case_token_genders:
if case_token_msf:
if any(candidate_token_genders[:2]):
return True
elif case_token_gndr or candidate_token_gndr:
return True
else:
return True
return False
def dictionary_label(token, values, stack):
return any((n in values) for n in token.forms)
LABELS_LOOKUP_MAP = {
"gram": gram_label,
"gram-not": gram_not_label,
"dictionary": dictionary_label,
"gender-match": gender_match_label,
}
| GENDERS = ("masc", "femn", "neut", "Ms-f")
def gram_label(token, value, stack):
return value in token.grammemes
def gram_not_label(token, value, stack):
return not value in token.grammemes
def gender_match_label(token, index, stack, genders=GENDERS):
results = ((g in t.grammemes for g in genders) for t in (stack[index], token))
*case_token_genders, case_token_msf = next(results)
*candidate_token_genders, candidate_token_msf = next(results)
if not candidate_token_genders == case_token_genders:
if case_token_msf:
if any(candidate_token_genders[:2]):
return True
else:
return True
return False
def dictionary_label(token, values, stack):
return any((n in values) for n in token.forms)
LABELS_LOOKUP_MAP = {
"gram": gram_label,
"gram-not": gram_not_label,
"dictionary": dictionary_label,
"gender-match": gender_match_label,
}
| mit | Python |
0a722c29be0c3bb5c84a3579717e6042e365fdad | Bump version to 3.1.2-dev | DirkHoffmann/indico,DirkHoffmann/indico,DirkHoffmann/indico,indico/indico,DirkHoffmann/indico,indico/indico,indico/indico,indico/indico | indico/__init__.py | indico/__init__.py | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '3.1.2-dev'
PREFERRED_PYTHON_VERSION_SPEC = '~=3.9.0'
register_custom_mimetypes()
| # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '3.1.1'
PREFERRED_PYTHON_VERSION_SPEC = '~=3.9.0'
register_custom_mimetypes()
| mit | Python |
7d7cde3f6d77291ecb6ac4fa4a941b1fad60a03b | Fix setup.py | pymfony/pymfony | src/pymfony/component/system/setup.py | src/pymfony/component/system/setup.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import os;
from distutils.core import setup;
"""
"""
realpathfile = os.path.realpath(os.path.dirname(__file__));
os.chdir(realpathfile);
f = open("README.md");
long_description = "\n"+f.read();
f.close();
def find_packages():
return [
'pymfony.component.system',
'pymfony.component.system.py2',
'pymfony.component.system.py2.minor6',
'pymfony.component.system.py3',
];
def find_package_data():
return {
};
setup(
name="pymfony.yaml",
version="2.2.0b1",
package_dir={'pymfony.component.system': ''},
packages=find_packages(),
package_data=find_package_data(),
author="Alexandre Quercia",
author_email="alquerci@email.com",
url="http://github.com/alquerci/pymfony-system",
description='Pymfony System Component',
long_description=long_description,
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
);
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import os;
from distutils.core import setup;
"""
"""
realpathfile = os.path.realpath(os.path.dirname(__file__));
os.chdir(realpathfile);
f = open("README.md");
long_description = "\n"+f.read();
f.close();
def find_packages():
return [
'pymfony.component.system',
];
def find_package_data():
return {
};
setup(
name="pymfony.yaml",
version="2.2.0b1",
package_dir={'pymfony.component.system': ''},
packages=find_packages(),
package_data=find_package_data(),
author="Alexandre Quercia",
author_email="alquerci@email.com",
url="http://github.com/alquerci/pymfony-system",
description='Pymfony System Component',
long_description=long_description,
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
);
| mit | Python |
dbcb69100795d8ac00dd3ceb91b06b94e2596c7f | upgrade spectrum simu | Koheron/lase | lase/drivers/spectrum_simu.py | lase/drivers/spectrum_simu.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from .base_simu import BaseSimu
class SpectrumSimu(BaseSimu):
def __init__(self):
n = 4096
super(SpectrumSimu, self).__init__(n)
self.waveform_size = n
self.spectrum = np.zeros(self.sampling.n, dtype=np.float32)
self.demod = np.zeros((2, self.sampling.n, n))
self.demod[0, :] = 0.49 * (1-np.cos(2 * np.pi * np.arange(self.sampling.n) /self.sampling.n))
self.demod[1, :] = 0
self.velocity_signal = 1
self.reset()
def set_velocity(self, velocity, SNR):
""" Generates the velocity beat signal for velocimeter simulation
Args:
- velocity: Object velocity (m/s)
- SNR: Signal-to-noise
"""
time = np.linspace(0,
self.model.sampling.n *
self.model.sampling.dt,
self.model.sampling.n)
omega_doppler = 2 * np.pi * velocity / self.model._wavelength
# Doppler shift (rad/s)
self.velocity_signal = np.cos(omega_doppler*time) + np.random.randn(self.model.sampling.n) / SNR
def get_spectrum(self):
self.update()
self.set_velocity(velocity=4.2, SNR=0.25)
adc = self.model.adc_from_voltage(
self.model.photodetection_voltage(self.model._optical_attenuation *
self._live_laser_power *
self.velocity_signal),
n_avg=1)
self.spectrum = np.abs(np.fft.fft(adc))
def set_demod(self):
pass
def get_peak_values(self):
return 0
def set_address_range(self, address_low, address_high):
pass
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from .base_simu import BaseSimu
class SpectrumSimu(BaseSimu):
def __init__(self):
n = 4096
super(SpectrumSimu, self).__init__(n)
self.waveform_size = n
self.spectrum = np.zeros(self.sampling.n, dtype=np.float32)
self.demod = np.zeros((2, self.sampling.n, n))
self.demod[0, :] = 0.49 * (1-np.cos(2 * np.pi * np.arange(self.sampling.n) /self.sampling.n))
self.demod[1, :] = 0
self.velocity_signal = 1
self.reset()
def set_velocity(self, velocity, SNR):
""" Generates the velocity beat signal for velocimeter simulation
Args:
- velocity: Object velocity (m/s)
- SNR: Signal-to-noise
"""
time = np.linspace(0,
self.model.sampling.n *
self.model.sampling.dt,
self.model.sampling.n)
omega_doppler = 2 * np.pi * velocity / self.model._wavelength
# Doppler shift (rad/s)
self.velocity_signal = np.cos(omega_doppler*time) + np.random.randn(self.model.sampling.n) / SNR
def get_spectrum(self):
self.update()
self.set_velocity(velocity=4.2, SNR=0.25)
adc = self.model.adc_from_voltage(
self.model.photodetection_voltage(self.model._optical_attenuation *
self._live_laser_power *
self.velocity_signal),
n_avg=1)
self.spectrum = np.abs(np.fft.fft(adc))
def set_demod(self):
pass
| mit | Python |
4db1bcf44bb2f3cc9621d9a58739029721a31f14 | add wildcards to bundled libzmq glob | caidongyun/pyzmq,dash-dash/pyzmq,ArvinPan/pyzmq,caidongyun/pyzmq,yyt030/pyzmq,swn1/pyzmq,yyt030/pyzmq,Mustard-Systems-Ltd/pyzmq,dash-dash/pyzmq,dash-dash/pyzmq,swn1/pyzmq,caidongyun/pyzmq,Mustard-Systems-Ltd/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,ArvinPan/pyzmq,ArvinPan/pyzmq,swn1/pyzmq | zmq/__init__.py | zmq/__init__.py | """Python bindings for 0MQ."""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import glob
here = os.path.dirname(__file__)
bundled = []
for ext in ('pyd', 'so', 'dll', 'dylib'):
bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext)))
if bundled:
import ctypes
if bundled[0].endswith('.pyd'):
# a Windows Extension
_libzmq = ctypes.cdll.LoadLibrary(bundled[0])
else:
_libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
del ctypes
del os, sys, glob, here, bundled, ext
from zmq.utils import initthreads # initialize threads
initthreads.init_threads()
from zmq import core, devices
from zmq.core import *
def get_includes():
"""Return a list of directories to include for linking against pyzmq with cython."""
from os.path import join, dirname, abspath, pardir
base = dirname(__file__)
parent = abspath(join(base, pardir))
return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
__all__ = ['get_includes'] + core.__all__
| """Python bindings for 0MQ."""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import glob
here = os.path.dirname(__file__)
bundled = []
for ext in ('pyd', 'so', 'dll', 'dylib'):
bundled.extend(glob.glob(os.path.join(here, 'libzmq.%s' % ext)))
if bundled:
import ctypes
if bundled[0].endswith('.pyd'):
# a Windows Extension
_libzmq = ctypes.cdll.LoadLibrary(bundled[0])
else:
_libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
del ctypes
del os, sys, glob, here, bundled, ext
from zmq.utils import initthreads # initialize threads
initthreads.init_threads()
from zmq import core, devices
from zmq.core import *
def get_includes():
"""Return a list of directories to include for linking against pyzmq with cython."""
from os.path import join, dirname, abspath, pardir
base = dirname(__file__)
parent = abspath(join(base, pardir))
return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
__all__ = ['get_includes'] + core.__all__
| bsd-3-clause | Python |
a44ec4543fc6951cd45ba3c1696e428e36a9c161 | Make sure the target of Say isn't in Unicode, otherwise Twisted complains | Didero/DideRobot | commands/say.py | commands/say.py | from CommandTemplate import CommandTemplate
from IrcMessage import IrcMessage
class Command(CommandTemplate):
triggers = ['say', 'do', 'notice']
helptext = "Makes the bot say the provided text in the provided channel (format 'say [channel/user] text')"
adminOnly = True
showInCommandList = False
def execute(self, message):
"""
:type message: IrcMessage
"""
if message.messagePartsLength < 2:
message.bot.say(message.source, u"Please provide both a channel or user name to say something to, and the text to say")
#Check if we're in the channel we have to say something to
elif not message.isPrivateMessage and message.messageParts[0] not in message.bot.channelsUserList:
message.bot.say(message.source, u"I'm not in that channel, so I can't say anything in there, sorry.")
#Nothing's stopping us now! Say it!
else:
messageToSay = u" ".join(message.messageParts[1:])
messageType = u'say'
if message.trigger == u'do':
messageType = u'action'
elif message.trigger == u'notice':
messageType = u'notice'
target = message.messageParts[0]
#Make absolutely sure the target isn't unicode, because Twisted doesn't like that
try:
target = target.encode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
print "[Say module] Unable to convert '{}' to a string".format(target)
message.bot.sendMessage(target, messageToSay, messageType)
| from CommandTemplate import CommandTemplate
from IrcMessage import IrcMessage
class Command(CommandTemplate):
triggers = ['say', 'do', 'notice']
helptext = "Makes the bot say the provided text in the provided channel (format 'say [channel/user] text')"
adminOnly = True
showInCommandList = False
def execute(self, message):
"""
:type message: IrcMessage
"""
if message.messagePartsLength < 2:
message.bot.say(message.source, u"Please provide both a channel or user name to say something to, and the text to say")
#Check if we're in the channel we have to say something to
elif not message.isPrivateMessage and message.messageParts[0] not in message.bot.channelsUserList:
message.bot.say(message.source, u"I'm not in that channel, so I can't say anything in there, sorry.")
#Nothing's stopping us now! Say it!
else:
messageToSay = u" ".join(message.messageParts[1:])
messageType = u'say'
if message.trigger == u'do':
messageType = u'action'
elif message.trigger == u'notice':
messageType = u'notice'
message.bot.sendMessage(message.messageParts[0], messageToSay, messageType)
| mit | Python |
42c2389c88fc52e186079df1c426af429537ed0e | Update blender plugin version to the next release number | ndevenish/Blender_ioEDM,ndevenish/Blender_ioEDM | io_EDM/__init__.py | io_EDM/__init__.py |
bl_info = {
'name': "Import: .EDM model files",
'description': "Importing of .EDM model files",
'author': "Nicholas Devenish",
'version': (0,3,0),
'blender': (2, 78, 0),
'location': "File > Import/Export > .EDM Files",
'category': 'Import-Export',
}
try:
import bpy
def register():
from .io_operators import register as importer_register
from .rna import register as rna_register
from .panels import register as panels_register
rna_register()
panels_register()
importer_register()
def unregister():
from .io_operators import unregister as importer_unregister
from .rna import unregister as rna_unregister
from .panels import unregister as panels_unregister
importer_unregister()
panels_unregister()
rna_unregister()
if __name__ == "__main__":
register()
except ImportError:
# Allow for now, as we might just want to import the sub-package
pass |
bl_info = {
'name': "Import: .EDM model files",
'description': "Importing of .EDM model files",
'author': "Nicholas Devenish",
'version': (0,0,1),
'blender': (2, 78, 0),
'location': "File > Import/Export > .EDM Files",
'category': 'Import-Export',
}
try:
import bpy
def register():
from .io_operators import register as importer_register
from .rna import register as rna_register
from .panels import register as panels_register
rna_register()
panels_register()
importer_register()
def unregister():
from .io_operators import unregister as importer_unregister
from .rna import unregister as rna_unregister
from .panels import unregister as panels_unregister
importer_unregister()
panels_unregister()
rna_unregister()
if __name__ == "__main__":
register()
except ImportError:
# Allow for now, as we might just want to import the sub-package
pass | mit | Python |
a2d9b2e9aa0e1b3cdfd3db20d3b62eab74be5408 | comment only | cropleyb/pentai,cropleyb/pentai,cropleyb/pentai | evaluator.py | evaluator.py | from utility_stats import *
from null_filter import *
class Evaluator():
"""
This is to help with debugging bad moves.
"""
def __init__(self, calculator, state):
self.state = state
self.calculator = calculator
calculator.set_rules(self.get_rules())
self.utility_stats = UtilityStats(parent=None, search_filter=NullFilter())
state.add_observer(self)
#self.rules = self.get_rules() # TODO
def board(self):
return self.state.board
def game(self):
return self.state.game
def get_rules(self):
return self.game().rules
def reset_state(self):
self.utility_stats.reset()
def before_set_occ(self, pos, colour):
self.utility_stats.set_or_reset_occs( \
self.board(), self.get_rules(), pos, -1)
def after_set_occ(self, pos, colour):
self.utility_stats.set_or_reset_occs( \
self.board(), self.get_rules(), pos, 1)
def search_player_colour(self):
""" The AI player who is performing the search.
For the evaluator, we will always show it from one
player's perspective
"""
return BLACK
'''
game = self.game()
return game.to_move_colour()
'''
def to_move_colour(self):
return self.state.to_move_colour()
def get_captured(self, colour):
return self.state.get_captured(colour)
def get_move_number(self):
return self.state.get_move_number()
def get_takes(self):
return self.utility_stats.takes
def set_won_by(self, colour):
pass
def utility(self):
# TODO: self.state is not an ABState instance
return "Utility for %s: %s (%s)" % ( self.get_move_number(),
self.calculator.utility(self, self.utility_stats),
self.utility_stats)
| from utility_stats import *
from null_filter import *
class Evaluator():
"""
This is for help with debugging bad moves.
"""
def __init__(self, calculator, state):
self.state = state
self.calculator = calculator
calculator.set_rules(self.get_rules())
self.utility_stats = UtilityStats(parent=None, search_filter=NullFilter())
state.add_observer(self)
#self.rules = self.get_rules() # TODO
def board(self):
return self.state.board
def game(self):
return self.state.game
def get_rules(self):
return self.game().rules
def reset_state(self):
self.utility_stats.reset()
def before_set_occ(self, pos, colour):
self.utility_stats.set_or_reset_occs( \
self.board(), self.get_rules(), pos, -1)
def after_set_occ(self, pos, colour):
self.utility_stats.set_or_reset_occs( \
self.board(), self.get_rules(), pos, 1)
def search_player_colour(self):
""" The AI player who is performing the search.
For the evaluator, we will always show it from one
player's perspective
"""
return BLACK
'''
game = self.game()
return game.to_move_colour()
'''
def to_move_colour(self):
return self.state.to_move_colour()
def get_captured(self, colour):
return self.state.get_captured(colour)
def get_move_number(self):
return self.state.get_move_number()
def get_takes(self):
return self.utility_stats.takes
def set_won_by(self, colour):
pass
def utility(self):
# TODO: self.state is not an ABState instance
return "Utility for %s: %s (%s)" % ( self.get_move_number(),
self.calculator.utility(self, self.utility_stats),
self.utility_stats)
| mit | Python |
fd6039e920643e09cbc60a8ddcfb7bf51a05dbbf | test commit | nchah/inf1340_2015_asst1 | exercise1.py | exercise1.py | #!/usr/bin/env python
""" Assignment 1, Exercise 1, INF1340, Fall, 2014. Grade to gpa conversion
This module prints the amount of money that Lakshmi has remaining
after the stock transactions
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
# Test commit
money = 1000.00
print(money)
| #!/usr/bin/env python
""" Assignment 1, Exercise 1, INF1340, Fall, 2014. Grade to gpa conversion
This module prints the amount of money that Lakshmi has remaining
after the stock transactions
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
money = 1000.00
print(money)
| mit | Python |
5882fbdba87a26d3cdb7ae3f29d8af2140d83d2d | Use new cab URLs. | django/djangosnippets.org,django/djangosnippets.org,django/djangosnippets.org,django-de/djangosnippets.org,django-de/djangosnippets.org,django/djangosnippets.org,django-de/djangosnippets.org,django/djangosnippets.org,django-de/djangosnippets.org | djangosnippets/urls.py | djangosnippets/urls.py | from django.conf.urls.defaults import url, patterns, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.shortcuts import render
admin.autodiscover()
urlpatterns = patterns('',
url(r'^captcha/', include('captcha.urls')),
url(r'^accounts/', include('cab.urls.accounts')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^bookmarks/', include('cab.urls.bookmarks')),
url(r'^comments/', include('django.contrib.comments.urls')),
url(r'^feeds/', include('cab.urls.feeds')),
url(r'^languages/', include('cab.urls.languages')),
url(r'^popular/', include('cab.urls.popular')),
url(r'^search/', include('cab.urls.search')),
url(r'^snippets/', include('cab.urls.snippets')),
url(r'^tags/', include('cab.urls.tags')),
url(r'^users/$', 'cab.views.popular.top_authors', name='cab_top_authors'),
url(r'^users/(?P<username>[-\w]+)/$', 'cab.views.snippets.author_snippets',
name='cab_author_snippets'),
url(r'^$', lambda request: render(request, 'homepage.html')),
)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
| from django.conf.urls.defaults import url, patterns, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.shortcuts import render
from haystack.views import SearchView, search_view_factory
from cab import feeds
from cab.forms import AdvancedSearchForm, RegisterForm
from registration.backends.default.views import RegistrationView
admin.autodiscover()
class CabRegistrationView(RegistrationView):
form_class = RegisterForm
urlpatterns = patterns('',
url(r'^captcha/', include('captcha.urls')),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^accounts/register/$', CabRegistrationView.as_view(),
name='registration_register'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^bookmarks/', include('cab.urls.bookmarks')),
url(r'^comments/', include('django.contrib.comments.urls')),
url(r'^feeds/author/(?P<username>[\w.@+-]+)/$',
feeds.SnippetsByAuthorFeed()),
url(r'^feeds/language/(?P<slug>[\w-]+)/$', feeds.SnippetsByLanguageFeed()),
url(r'^feeds/latest/$', feeds.LatestSnippetsFeed()),
url(r'^feeds/tag/(?P<slug>[\w-]+)/$', feeds.SnippetsByTagFeed()),
url(r'^languages/', include('cab.urls.languages')),
url(r'^popular/', include('cab.urls.popular')),
url(r'^search/$', 'haystack.views.basic_search', name='cab_search'),
url(r'^search/autocomplete/$', 'cab.views.snippets.autocomplete',
name='snippet_autocomplete'),
url(r'^search/advanced/$', search_view_factory(view_class=SearchView,
template='search/advanced_search.html', form_class=AdvancedSearchForm),
name='cab_search_advanced'),
url(r'^snippets/', include('cab.urls.snippets')),
url(r'^tags/', include('cab.urls.tags')),
url(r'^users/$', 'cab.views.popular.top_authors', name='cab_top_authors'),
url(r'^users/(?P<username>[-\w]+)/$', 'cab.views.snippets.author_snippets',
name='cab_author_snippets'),
url(r'^$', lambda request: render(request, 'homepage.html')),
)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
| bsd-3-clause | Python |
0ad39c6a0415d997a433ddc828c180154e9c257d | Print 404 to chan | jasuka/pyBot,jasuka/pyBot | modules/syscmd.py | modules/syscmd.py | import urllib.request
import os
import re
## Get HTML for given url
def getHtml( self, url, useragent):
try:
if useragent == True:
user_agent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)"
headers = { 'User-Agent' : user_agent }
req = urllib.request.Request(url, None, headers)
else:
req = urllib.request.Request(url, None)
html = urllib.request.urlopen(req, timeout = 10).read()
return(html)
except HTTPError:
self.send_chan( "{0}".format(e) )
except Exception as e:
if self.config["debug"] == "true":
print(e)
## End
## Check if the city exists in Finland
def checkCity ( city ):
try:
city = city.title().strip()
with open("modules/data/cities.txt", "r", encoding="UTF-8") as file:
data = [x.strip() for x in file.readlines()]
if city in data:
return(True)
except IOError as e:
if self.config["debug"] == "true":
print(e)
## End
## Clears html tags from a string
def delHtml( html ):
try:
html = re.sub('<[^<]+?>', '', html)
return(html)
except Exception as e:
if self.config["debug"] == "true":
print(e)
## End | import urllib.request
import os
import re
## Get HTML for given url
def getHtml( self, url, useragent):
try:
if useragent == True:
user_agent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)"
headers = { 'User-Agent' : user_agent }
req = urllib.request.Request(url, None, headers)
else:
req = urllib.request.Request(url, None)
html = urllib.request.urlopen(req, timeout = 10).read()
return(html)
except Exception as e:
self.send_chan( "{0}".format(e) )
if self.config["debug"] == "true":
print(e)
## End
## Check if the city exists in Finland
def checkCity ( city ):
try:
city = city.title().strip()
with open("modules/data/cities.txt", "r", encoding="UTF-8") as file:
data = [x.strip() for x in file.readlines()]
if city in data:
return(True)
except IOError as e:
if self.config["debug"] == "true":
print(e)
## End
## Clears html tags from a string
def delHtml( html ):
try:
html = re.sub('<[^<]+?>', '', html)
return(html)
except Exception as e:
if self.config["debug"] == "true":
print(e)
## End | mit | Python |
d8cfd9218ce6b9b341020c934c9102d4c776ebc7 | Allow empty --exclude | jeff-allen-mongo/mut,jeff-allen-mongo/mut | mut/index/main.py | mut/index/main.py | '''
Usage:
mut-index <root> -o <output> -u <url> [-g -s --exclude <paths>]
mut-index upload [-b <bucket> -p <prefix> --no-backup] <root> -o <output> -u <url> [-g -s --exclude <paths>]
-h, --help List CLI prototype, arguments, and options.
<root> Path to the directory containing html files.
-o, --output <output> File name for the output manifest json. (e.g. manual-v3.2.json)
-u, --url <url> Base url of the property.
-g, --global Includes the manifest when searching all properties.
-s, --show-progress Shows a progress bar and other information via stdout.
-b, --bucket <bucket> Name of the s3 bucket to upload the index manifest to. [default: docs-mongodb-org-prod]
-p, --prefix <prefix> Name of the s3 prefix to attached to the manifest. [default: search-indexes]
--no-backup Disables automatic backup and restore of previous manifest versions.
--exclude <paths> A comma-separated list of directories to ignore. [default: ]
'''
from docopt import docopt
from mut.index.Manifest import generate_manifest
from mut.index.s3upload import upload_manifest_to_s3
from mut.index.MarianActions import refresh_marian, FailedRefreshError
from mut.index.utils.IntroMessage import print_intro_message
def main() -> None:
'''Generate index files.'''
options = docopt(__doc__)
root = options['<root>']
exclude = [path.strip() for path in options['--exclude'].split(',') if path]
output = options['--output']
url = options['--url']
globally = options['--global']
show_progress = options['--show-progress']
print_intro_message(root, exclude, output, url, globally)
manifest = generate_manifest(url, root, exclude, globally, show_progress)
if options['upload']:
bucket = options['--bucket']
prefix = options['--prefix']
do_backup = not options['--no-backup']
backup = upload_manifest_to_s3(bucket, prefix, output,
manifest, do_backup)
try:
refresh_marian()
print('\nAll according to plan!')
except FailedRefreshError:
if backup and do_backup:
backup.restore()
else:
with open('./' + output, 'w') as file:
file.write(manifest)
if __name__ == "__main__":
main()
| '''
Usage:
mut-index <root> -o <output> -u <url> [-g -s --exclude <paths>]
mut-index upload [-b <bucket> -p <prefix> --no-backup] <root> -o <output> -u <url> [-g -s --exclude <paths>]
-h, --help List CLI prototype, arguments, and options.
<root> Path to the directory containing html files.
-o, --output <output> File name for the output manifest json. (e.g. manual-v3.2.json)
-u, --url <url> Base url of the property.
-g, --global Includes the manifest when searching all properties.
-s, --show-progress Shows a progress bar and other information via stdout.
-b, --bucket <bucket> Name of the s3 bucket to upload the index manifest to. [default: docs-mongodb-org-prod]
-p, --prefix <prefix> Name of the s3 prefix to attached to the manifest. [default: search-indexes]
--no-backup Disables automatic backup and restore of previous manifest versions.
--exclude <paths> A comma-separated list of directories to ignore.
'''
from docopt import docopt
from mut.index.Manifest import generate_manifest
from mut.index.s3upload import upload_manifest_to_s3
from mut.index.MarianActions import refresh_marian, FailedRefreshError
from mut.index.utils.IntroMessage import print_intro_message
def main() -> None:
'''Generate index files.'''
options = docopt(__doc__)
root = options['<root>']
exclude = [path.strip() for path in options.get('--exclude', '').split(',')]
output = options['--output']
url = options['--url']
globally = options['--global']
show_progress = options['--show-progress']
print_intro_message(root, exclude, output, url, globally)
manifest = generate_manifest(url, root, exclude, globally, show_progress)
if options['upload']:
bucket = options['--bucket']
prefix = options['--prefix']
do_backup = not options['--no-backup']
backup = upload_manifest_to_s3(bucket, prefix, output,
manifest, do_backup)
try:
refresh_marian()
print('\nAll according to plan!')
except FailedRefreshError:
if backup and do_backup:
backup.restore()
else:
with open('./' + output, 'w') as file:
file.write(manifest)
if __name__ == "__main__":
main()
| apache-2.0 | Python |
10d7e64b33089597c1f3977e6611304ec783aaf0 | add matching pattern to pdb code in list | normcyr/fetch-pdb | fetch-pdb.py | fetch-pdb.py | #!/usr/bin/python3
from urllib.request import urlopen
from re import match, compile
def downloading(filename):
with open(filename) as pdb_list:
for structure in pdb_list:
if match(pdbPattern, structure):
pdb_url = base_url + structure[:4]
out_file_name = structure[:4] + '.pdb'
with urlopen(pdb_url) as response, open(out_file_name, 'wb') as out_file:
data = response.read()
out_file.write(data)
print('Downloading {} as {}.'.format(structure[:4], out_file_name))
return(data)
if __name__ == '__main__':
filename = 'pdblist'
pdbPattern = compile('\d.{3}')
base_url = 'http://www.pdb.org/pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId='
data = downloading(filename)
| #!/usr/bin/python3
import urllib.request
def downloading(filename):
with open(filename) as pdb_list:
for structure in pdb_list:
pdb_url = base_url + structure[:4]
out_file_name = structure[:4] + '.pdb'
with urllib.request.urlopen(pdb_url) as response, open(out_file_name, 'wb') as out_file:
data = response.read()
out_file.write(data)
print('Downloading {} as {}.'.format(structure[:4], out_file_name))
return(data)
if __name__ == '__main__':
filename = 'pdblist'
base_url = 'http://www.pdb.org/pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId='
data = downloading(filename)
| mit | Python |
2f617c6fb331f4e9247a1c04635460e55581d837 | add info to nodepy.runtime.scripts that it can also holds an 'args' member | nodepy/nodepy | nodepy/runtime.py | nodepy/runtime.py |
import os
import sys
#: This value is set automatically before the Node.py entry point is invoked
#: from scripts that are installed via the Node.py package manager. It will be
#: a dictionary with the following keys:
#:
#: * location: Either `system`, `global` or `local`
#: * original_path: The original value of `sys.path` before it was augmented
#: by the Node.py entry point.
#: * args: The original value of `sys.argv` when the script was invoked.
script = None
#: A list of command-line arguments to spawn a new Node.py child-process.
#: This is usually the Python interpreter and the path to the Node.py Python
#: module.
exec_args = [sys.executable, os.path.join(os.path.dirname(__file__), 'main.py')]
#: The name of the Python implementation that we're running, eg. cpython,
#: pypy, jython, ironpython, etc.
implementation = None
if hasattr(sys, 'implementation'):
implementation = sys.implementation.name.lower()
else:
implementation = sys.subversion[0].lower()
#: The value of the `NODEPY_ENV` environment variable, which must be either
#: `"production"` or `"development"`. If an invalid value is specified, a
#: warning is printed and it defaults to `"development"`.
env = os.getenv('NODEPY_ENV', 'development')
if env not in ('production', 'development'):
print('warning: invalid value of environment variable NODEPY_ENV="{}".'
.format(env))
print(' falling back to NODEPY_ENV="development".')
os.environ['NODEPY_ENV'] = env = 'development'
|
import os
import sys
#: This value is set automatically before the Node.py entry point is invoked
#: from scripts that are installed via the Node.py package manager. It will be
#: a dictionary with the following keys:
#:
#: * location: Either `system`, `global` or `local`
#: * original_path: The original value of `sys.path` before it was augmented
#: by the Node.py entry point.
script = None
#: A list of command-line arguments to spawn a new Node.py child-process.
#: This is usually the Python interpreter and the path to the Node.py Python
#: module.
exec_args = [sys.executable, os.path.join(os.path.dirname(__file__), 'main.py')]
#: The name of the Python implementation that we're running, eg. cpython,
#: pypy, jython, ironpython, etc.
implementation = None
if hasattr(sys, 'implementation'):
implementation = sys.implementation.name.lower()
else:
implementation = sys.subversion[0].lower()
#: The value of the `NODEPY_ENV` environment variable, which must be either
#: `"production"` or `"development"`. If an invalid value is specified, a
#: warning is printed and it defaults to `"development"`.
env = os.getenv('NODEPY_ENV', 'development')
if env not in ('production', 'development'):
print('warning: invalid value of environment variable NODEPY_ENV="{}".'
.format(env))
print(' falling back to NODEPY_ENV="development".')
os.environ['NODEPY_ENV'] = env = 'development'
| mit | Python |
b50cd7e7a06fe19b6bc68aa9815d33e27d8b9751 | Bump version for dev release. | openxc/openxc-python,openxc/openxc-python,openxc/openxc-python | openxc/version.py | openxc/version.py | """
Current OpenXC version constant.
This functionality is contained in its own module to prevent circular import
problems with ``__init__.py`` (which is loaded by setup.py during installation,
which in turn needs access to this version information.)
"""
VERSION = (0, 11, 2)
__version__ = '.'.join(map(str, VERSION))
def get_version():
return __version__
| """
Current OpenXC version constant.
This functionality is contained in its own module to prevent circular import
problems with ``__init__.py`` (which is loaded by setup.py during installation,
which in turn needs access to this version information.)
"""
VERSION = (0, 11, 1)
__version__ = '.'.join(map(str, VERSION))
def get_version():
return __version__
| bsd-3-clause | Python |
3c2b0e061721c592c68fa253bedc5da8ecaf806a | Add function to check old pipeline | alexandreleroux/mayavi,liulion/mayavi,dmsurti/mayavi,alexandreleroux/mayavi,liulion/mayavi,dmsurti/mayavi | tvtk/common.py | tvtk/common.py | """Common functions and classes that do not require any external
dependencies (apart from the standard library of course).
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2007, Enthought, Inc.
# License: BSD Style.
import string
import re
import vtk
######################################################################
# Utility functions.
######################################################################
def get_tvtk_name(vtk_name):
"""Converts a VTK class name to a TVTK class name.
This function additionally converts any leading digits into a
suitable string.
For example:
>>> get_tvtk_name('vtk3DSImporter')
'ThreeDSImporter'
>>> get_tvtk_name('vtkXMLDataReader')
'XMLDataReader'
"""
if vtk_name[:3] == 'vtk':
name = vtk_name[3:]
dig2name = {'1':'One', '2':'Two', '3':'Three', '4':'Four',
'5':'Five', '6': 'Six', '7':'Seven', '8':'Eight',
'9': 'Nine', '0':'Zero'}
if name[0] in string.digits:
return dig2name[name[0]] + name[1:]
else:
return name
else:
return vtk_name
def is_old_pipeline():
vtk_major_version = vtk.vtkVersion.GetVTKMajorVersion()
if vtk_major_version < 6:
return True
else:
return False
class _Camel2Enthought:
"""Simple functor class to convert names from CamelCase to
Enthought compatible names.
For example::
>>> camel2enthought = _Camel2Enthought()
>>> camel2enthought('XMLActor2DToSGML')
'xml_actor2d_to_sgml'
"""
def __init__(self):
self.patn = re.compile(r'([A-Z0-9]+)([a-z0-9]*)')
self.nd_patn = re.compile(r'(\D[123])_D')
def __call__(self, name):
ret = self.patn.sub(self._repl, name)
ret = self.nd_patn.sub(r'\1d', ret)
if ret[0] == '_':
ret = ret[1:]
return ret.lower()
def _repl(self, m):
g1 = m.group(1)
g2 = m.group(2)
if len(g1) > 1:
if g2:
return '_' + g1[:-1] + '_' + g1[-1] + g2
else:
return '_' + g1
else:
return '_' + g1 + g2
# Instantiate a converter.
camel2enthought = _Camel2Enthought()
| """Common functions and classes that do not require any external
dependencies (apart from the standard library of course).
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2007, Enthought, Inc.
# License: BSD Style.
import string
import re
######################################################################
# Utility functions.
######################################################################
def get_tvtk_name(vtk_name):
"""Converts a VTK class name to a TVTK class name.
This function additionally converts any leading digits into a
suitable string.
For example:
>>> get_tvtk_name('vtk3DSImporter')
'ThreeDSImporter'
>>> get_tvtk_name('vtkXMLDataReader')
'XMLDataReader'
"""
if vtk_name[:3] == 'vtk':
name = vtk_name[3:]
dig2name = {'1':'One', '2':'Two', '3':'Three', '4':'Four',
'5':'Five', '6': 'Six', '7':'Seven', '8':'Eight',
'9': 'Nine', '0':'Zero'}
if name[0] in string.digits:
return dig2name[name[0]] + name[1:]
else:
return name
else:
return vtk_name
class _Camel2Enthought:
"""Simple functor class to convert names from CamelCase to
Enthought compatible names.
For example::
>>> camel2enthought = _Camel2Enthought()
>>> camel2enthought('XMLActor2DToSGML')
'xml_actor2d_to_sgml'
"""
def __init__(self):
self.patn = re.compile(r'([A-Z0-9]+)([a-z0-9]*)')
self.nd_patn = re.compile(r'(\D[123])_D')
def __call__(self, name):
ret = self.patn.sub(self._repl, name)
ret = self.nd_patn.sub(r'\1d', ret)
if ret[0] == '_':
ret = ret[1:]
return ret.lower()
def _repl(self, m):
g1 = m.group(1)
g2 = m.group(2)
if len(g1) > 1:
if g2:
return '_' + g1[:-1] + '_' + g1[-1] + g2
else:
return '_' + g1
else:
return '_' + g1 + g2
# Instantiate a converter.
camel2enthought = _Camel2Enthought()
| bsd-3-clause | Python |
96465f47e82b6f57b34cd993fb6518771a836113 | bump version | brentp/combined-pvalues,brentp/combined-pvalues | cpv/__init__.py | cpv/__init__.py | #
__version__ = "0.50.6"
| #
__version__ = "0.50.5"
| mit | Python |
621337bd685a200a37bcbbd5fe3441d2090aab54 | Add PYTHON_ARGCOMPLETE_OK to enable completion for argcomplete users | mikethebeer/cr8,mfussenegger/cr8 | cr8/__main__.py | cr8/__main__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
import argh
import argparse
from cr8 import __version__
from cr8.timeit import timeit
from cr8.insert_json import insert_json
from cr8.insert_fake_data import insert_fake_data
from cr8.insert_blob import insert_blob
from cr8.run_spec import run_spec
from cr8.run_crate import run_crate
from cr8.run_track import run_track
def main():
p = argh.ArghParser(
prog='cr8', formatter_class=argparse.RawTextHelpFormatter)
p.add_argument(
'--version', action='version', version="%(prog)s " + __version__)
p.add_commands([timeit,
insert_json,
insert_fake_data,
insert_blob,
run_spec,
run_crate,
run_track])
p.dispatch()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argh
import argparse
from cr8 import __version__
from cr8.timeit import timeit
from cr8.insert_json import insert_json
from cr8.insert_fake_data import insert_fake_data
from cr8.insert_blob import insert_blob
from cr8.run_spec import run_spec
from cr8.run_crate import run_crate
from cr8.run_track import run_track
def main():
p = argh.ArghParser(
prog='cr8', formatter_class=argparse.RawTextHelpFormatter)
p.add_argument(
'--version', action='version', version="%(prog)s " + __version__)
p.add_commands([timeit,
insert_json,
insert_fake_data,
insert_blob,
run_spec,
run_crate,
run_track])
p.dispatch()
if __name__ == '__main__':
main()
| mit | Python |
3c151b93f0c122f098642cabf97f755133e41a63 | Document --config and make it required. | bnkr/craftrun,bnkr/craftrun | craftrun/cli.py | craftrun/cli.py | import argparse, sys, yaml, os, logging
from craftrun import command
class Settings(object):
"""Cli and config file settings."""
def __init__(self, cli):
self.cli = cli
with open(cli.config, 'r') as io:
self.config = yaml.load(io.read())
@property
def base_dir(self):
return self._absolute_path(self.config['base_dir'])
@property
def backup_dir(self):
return self._absolute_path(self.config['backup_dir'])
@property
def server_name(self):
return self.config['server_name']
@property
def java_bin(self):
return self.config.get('java_bin', 'java')
@property
def server_jar(self):
return self.config['server_jar']
@property
def java_args(self):
default_args = ['-Xmx2G', '-XX:MaxPermSize=256M']
return self.config.get('java_args', default_args)
def _absolute_path(self, path):
config_dir = os.path.dirname(self.cli.config)
return os.path.realpath(os.path.join(config_dir, path))
class CraftRunCli(object):
"""
Creates commands and runs them with apropriate settings.
"""
def __init__(self):
self.commands = [
command.StartCommand,
command.StopCommand,
command.ConsoleCommand,
command.BackupCommand,
]
def run(self):
parser = self.get_parser()
parser.add_argument("-c", "--config", dest="config", required=True,
help="Server configuration.")
parsed = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
)
selected = next((command for command in self.commands
if command.name == parsed.command))
config = Settings(cli=parsed)
return selected(config).run()
def get_parser(self):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='operation', dest="command")
for command in self.commands:
subparser = subparsers.add_parser(command.name, help=command.help)
command.configure_cli(subparser)
return parser
def main():
"""Cli entry point."""
return CraftRunCli().run()
| import argparse, sys, yaml, os, logging
from craftrun import command
class Settings(object):
"""Cli and config file settings."""
def __init__(self, cli):
self.cli = cli
with open(cli.config, 'r') as io:
self.config = yaml.load(io.read())
@property
def base_dir(self):
return self._absolute_path(self.config['base_dir'])
@property
def backup_dir(self):
return self._absolute_path(self.config['backup_dir'])
@property
def server_name(self):
return self.config['server_name']
@property
def java_bin(self):
return self.config.get('java_bin', 'java')
@property
def server_jar(self):
return self.config['server_jar']
@property
def java_args(self):
default_args = ['-Xmx2G', '-XX:MaxPermSize=256M']
return self.config.get('java_args', default_args)
def _absolute_path(self, path):
config_dir = os.path.dirname(self.cli.config)
return os.path.realpath(os.path.join(config_dir, path))
class CraftRunCli(object):
"""
Creates commands and runs them with apropriate settings.
"""
def __init__(self):
self.commands = [
command.StartCommand,
command.StopCommand,
command.ConsoleCommand,
command.BackupCommand,
]
def run(self):
parser = self.get_parser()
parser.add_argument("-c", "--config", dest="config")
parsed = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
)
selected = next((command for command in self.commands
if command.name == parsed.command))
config = Settings(cli=parsed)
return selected(config).run()
def get_parser(self):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='operation', dest="command")
for command in self.commands:
subparser = subparsers.add_parser(command.name, help=command.help)
command.configure_cli(subparser)
return parser
def main():
"""Cli entry point."""
return CraftRunCli().run()
| mit | Python |
e09551ed8fef6c53acb5faab4a0165b61f98eab3 | Delete unneeded absolute_import | albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com | app/tests.py | app/tests.py | import tempfile
import unittest
import serve
import utils
class PageCase(unittest.TestCase):
def setUp(self):
serve.app.config['TESTING'] = True
self.app = serve.app.test_client()
def test_index_load(self):
self.page_test('/', b'Albert Wang')
def test_resume_load(self):
self.page_test('/resume', b'Resum')
def test_projects_load(self):
self.page_test('/projects', b'Projects')
def test_notes_load(self):
self.page_test('/notes', b'Notes')
def test_contact_load(self):
self.page_test('/contact', b'Contact')
def test_robots_load(self):
self.page_test('/robots.txt', b'')
def test_sitemap_load(self):
self.page_test('/sitemap.xml', b'xml')
def test_note_load(self):
self.page_test('/note/fibonaccoli', b'Romanesco')
def page_test(self, path, string):
response = self.app.get(path)
self.assertEqual(response.status_code, 200)
self.assertIn(string, response.get_data())
class UtilCase(unittest.TestCase):
def test_get_malformed_note(self):
note = b''
note_file = tempfile.NamedTemporaryFile()
note_file.write(note)
note = utils.get_note_file_data(note_file.name, None)
self.assertEqual(note, None)
note_file.close()
def test_prune_tilde_notes(self):
note_files = ['asdf', 'asdf~']
note_files = utils.prune_note_files(note_files)
self.assertEqual(note_files, ['asdf'])
def test_prune_dotfile_notes(self):
note_files = ['asdf', '.asdf']
note_files = utils.prune_note_files(note_files)
self.assertEqual(note_files, ['asdf'])
if __name__ == '__main__':
unittest.main()
| from __future__ import absolute_import
import tempfile
import unittest
import serve
import utils
class PageCase(unittest.TestCase):
def setUp(self):
serve.app.config['TESTING'] = True
self.app = serve.app.test_client()
def test_index_load(self):
self.page_test('/', b'Albert Wang')
def test_resume_load(self):
self.page_test('/resume', b'Resum')
def test_projects_load(self):
self.page_test('/projects', b'Projects')
def test_notes_load(self):
self.page_test('/notes', b'Notes')
def test_contact_load(self):
self.page_test('/contact', b'Contact')
def test_robots_load(self):
self.page_test('/robots.txt', b'')
def test_sitemap_load(self):
self.page_test('/sitemap.xml', b'xml')
def test_note_load(self):
self.page_test('/note/fibonaccoli', b'Romanesco')
def page_test(self, path, string):
response = self.app.get(path)
self.assertEqual(response.status_code, 200)
self.assertIn(string, response.get_data())
class UtilCase(unittest.TestCase):
def test_get_malformed_note(self):
note = b''
note_file = tempfile.NamedTemporaryFile()
note_file.write(note)
note = utils.get_note_file_data(note_file.name, None)
self.assertEqual(note, None)
note_file.close()
def test_prune_tilde_notes(self):
note_files = ['asdf', 'asdf~']
note_files = utils.prune_note_files(note_files)
self.assertEqual(note_files, ['asdf'])
def test_prune_dotfile_notes(self):
note_files = ['asdf', '.asdf']
note_files = utils.prune_note_files(note_files)
self.assertEqual(note_files, ['asdf'])
if __name__ == '__main__':
unittest.main()
| mit | Python |
1cb49fc209fb4aca854db732588003b704e3ef56 | Change resize button label | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/logical/widgets/database_offering_field.py | dbaas/logical/widgets/database_offering_field.py | from django import forms
from django.utils.safestring import mark_safe
class DatabaseOfferingWidget(forms.widgets.TextInput):
def render(self, name, value, attrs=None):
html = super(DatabaseOfferingWidget, self).render(name, value,attrs)
resize_link = """
</br><a id="resizeDatabase" class="btn btn-primary" href=
""" + self.attrs['database'].get_resize_url()+""">Resize VM</a >"""
html_plus = """
<style type="text/css">
#resizeDatabase {
position: relative;
top: 5px
}
</style>
"""
html = """{}{}{}""".format(html, resize_link, html_plus)
return mark_safe(html);
| from django import forms
from django.utils.safestring import mark_safe
class DatabaseOfferingWidget(forms.widgets.TextInput):
def render(self, name, value, attrs=None):
html = super(DatabaseOfferingWidget, self).render(name, value,attrs)
resize_link = """
</br><a id="resizeDatabase" class="btn btn-primary" href=
""" + self.attrs['database'].get_resize_url()+""">Resize Database</a >"""
html_plus = """
<style type="text/css">
#resizeDatabase {
position: relative;
top: 5px
}
</style>
"""
html = """{}{}{}""".format(html, resize_link, html_plus)
return mark_safe(html);
| bsd-3-clause | Python |
398cacda0209459217271d025115776864fd2d6a | Fix a bug where type comparison is case-sensitive for TIF | CenterForOpenScience/modular-file-renderer,CenterForOpenScience/modular-file-renderer,felliott/modular-file-renderer,CenterForOpenScience/modular-file-renderer,felliott/modular-file-renderer,felliott/modular-file-renderer,felliott/modular-file-renderer,CenterForOpenScience/modular-file-renderer | mfr/extensions/pdf/render.py | mfr/extensions/pdf/render.py | import logging
import os
import furl
from mako.lookup import TemplateLookup
from mfr.core import extension
from mfr.extensions.pdf import settings
from mfr.extensions.utils import munge_url_for_localdev
logger = logging.getLogger(__name__)
class PdfRenderer(extension.BaseRenderer):
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def render(self):
download_url = munge_url_for_localdev(self.metadata.download_url)
logger.debug('extension::{} supported-list::{}'.format(self.metadata.ext, settings.EXPORT_SUPPORTED))
if self.metadata.ext.lower() not in settings.EXPORT_SUPPORTED:
logger.debug('Extension not found in supported list!')
return self.TEMPLATE.render(
base=self.assets_url,
url=download_url.geturl(),
enable_hypothesis=settings.ENABLE_HYPOTHESIS
)
logger.debug('Extension found in supported list!')
exported_url = furl.furl(self.export_url)
if settings.EXPORT_TYPE:
if settings.EXPORT_MAXIMUM_SIZE:
exported_url.args['format'] = '{}.{}'.format(settings.EXPORT_MAXIMUM_SIZE,
settings.EXPORT_TYPE)
else:
exported_url.args['format'] = settings.EXPORT_TYPE
self.metrics.add('needs_export', True)
return self.TEMPLATE.render(
base=self.assets_url,
url=exported_url.url,
enable_hypothesis=settings.ENABLE_HYPOTHESIS
)
return self.TEMPLATE.render(
base=self.assets_url,
url=download_url.geturl(),
enable_hypothesis=settings.ENABLE_HYPOTHESIS
)
@property
def file_required(self):
return False
@property
def cache_result(self):
return False
| import logging
import os
import furl
from mako.lookup import TemplateLookup
from mfr.core import extension
from mfr.extensions.pdf import settings
from mfr.extensions.utils import munge_url_for_localdev
logger = logging.getLogger(__name__)
class PdfRenderer(extension.BaseRenderer):
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def render(self):
download_url = munge_url_for_localdev(self.metadata.download_url)
logger.debug('extension::{} supported-list::{}'.format(self.metadata.ext, settings.EXPORT_SUPPORTED))
if self.metadata.ext not in settings.EXPORT_SUPPORTED:
logger.debug('Extension not found in supported list!')
return self.TEMPLATE.render(
base=self.assets_url,
url=download_url.geturl(),
enable_hypothesis=settings.ENABLE_HYPOTHESIS
)
logger.debug('Extension found in supported list!')
exported_url = furl.furl(self.export_url)
if settings.EXPORT_TYPE:
if settings.EXPORT_MAXIMUM_SIZE:
exported_url.args['format'] = '{}.{}'.format(settings.EXPORT_MAXIMUM_SIZE,
settings.EXPORT_TYPE)
else:
exported_url.args['format'] = settings.EXPORT_TYPE
self.metrics.add('needs_export', True)
return self.TEMPLATE.render(
base=self.assets_url,
url=exported_url.url,
enable_hypothesis=settings.ENABLE_HYPOTHESIS
)
return self.TEMPLATE.render(
base=self.assets_url,
url=download_url.geturl(),
enable_hypothesis=settings.ENABLE_HYPOTHESIS
)
@property
def file_required(self):
return False
@property
def cache_result(self):
return False
| apache-2.0 | Python |
c804b5753f4805cf3d129fa4e7febef5c032b6ca | Update version to 1.1.0 | matrix-org/python-unpaddedbase64 | unpaddedbase64.py | unpaddedbase64.py | # Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
__version__ = "1.1.0"
def encode_base64(input_bytes, urlsafe=False):
"""Encode bytes as a base64 string without any padding."""
encode = base64.urlsafe_b64encode if urlsafe else base64.b64encode
output_bytes = encode(input_bytes)
output_string = output_bytes.decode("ascii")
return output_string.rstrip(u"=")
def decode_base64(input_string):
"""Decode a base64 string to bytes inferring padding from the length of the
string."""
input_bytes = input_string.encode("ascii")
input_len = len(input_bytes)
padding = b"=" * (3 - ((input_len + 3) % 4))
decode = base64.b64decode
if u'-' in input_string or u'_' in input_string:
decode = base64.urlsafe_b64decode
output_bytes = decode(input_bytes + padding)
return output_bytes
| # Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
__version__ = "1.0.1"
def encode_base64(input_bytes, urlsafe=False):
"""Encode bytes as a base64 string without any padding."""
encode = base64.urlsafe_b64encode if urlsafe else base64.b64encode
output_bytes = encode(input_bytes)
output_string = output_bytes.decode("ascii")
return output_string.rstrip(u"=")
def decode_base64(input_string):
"""Decode a base64 string to bytes inferring padding from the length of the
string."""
input_bytes = input_string.encode("ascii")
input_len = len(input_bytes)
padding = b"=" * (3 - ((input_len + 3) % 4))
decode = base64.b64decode
if u'-' in input_string or u'_' in input_string:
decode = base64.urlsafe_b64decode
output_bytes = decode(input_bytes + padding)
return output_bytes
| apache-2.0 | Python |
4b3c24dfce6f430d42ce9f24b72de54d34c9d79e | Fix for Numeric Overflow of `softplus` implementation (#30) | rushter/MLAlgorithms | mla/neuralnet/activations.py | mla/neuralnet/activations.py | import autograd.numpy as np
"""
References:
https://en.wikipedia.org/wiki/Activation_function
"""
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def softmax(z):
# Avoid numerical overflow by removing max
e = np.exp(z - np.amax(z, axis=1, keepdims=True))
return e / np.sum(e, axis=1, keepdims=True)
def linear(z):
return z
def softplus(z):
"""Smooth relu."""
# Avoid numerical overflow by putting possible inf into denominator position
return z + np.log(1 + 1 / np.exp(z))
def softsign(z):
return z / (1 + np.abs(z))
def tanh(z):
return np.tanh(z)
def relu(z):
return np.maximum(0, z)
def get_activation(name):
"""Return activation function by name"""
try:
return globals()[name]
except:
raise ValueError('Invalid activation function.')
| import autograd.numpy as np
"""
References:
https://en.wikipedia.org/wiki/Activation_function
"""
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def softmax(z):
# Avoid numerical overflow by removing max
e = np.exp(z - np.amax(z, axis=1, keepdims=True))
return e / np.sum(e, axis=1, keepdims=True)
def linear(z):
return z
def softplus(z):
"""Smooth relu."""
return np.log(1 + np.exp(z))
def softsign(z):
return z / (1 + np.abs(z))
def tanh(z):
return np.tanh(z)
def relu(z):
return np.maximum(0, z)
def get_activation(name):
"""Return activation function by name"""
try:
return globals()[name]
except:
raise ValueError('Invalid activation function.')
| mit | Python |
93de55a62a88cba979c9d5e979b26f41546e2105 | fix bug. use f-string format (#2681) | open-mmlab/mmdetection,open-mmlab/mmdetection | mmdet/datasets/wider_face.py | mmdet/datasets/wider_face.py | import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""
Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
| import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""
Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = '{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
| apache-2.0 | Python |
0a76529251c44a694c6a8f2d6fa3d6c2bf6e2de3 | Add TODO items inline | missaugustina/nova-api-docs-tracker,missaugustina/nova-api-docs-tracker,missaugustina/nova-api-docs-tracker,missaugustina/nova-api-docs-tracker | nova_api_docs_tracker/main.py | nova_api_docs_tracker/main.py | # -*- coding: utf-8 -*-
import glob
import os
import re
SPLIT_FILE_RE = re.compile(r'\n([\w\s]+)\n\=+\n', re.MULTILINE)
def main():
# TODO(auggy): args: inc files path
path = ''
output = {}
# get inc files
inc_files = get_inc_files(path)
for filename in inc_files:
contents = inc_files[filename]
# TODO(auggy): link to file on github??
# TODO(auggy): this should use docutils
extracted_contents = extract_stuff(split_inc_file(contents))
# verify body
output['body'] = extracted_contents['body']
# verify methods names
output['methods_list'] = '\n'.join(extracted_contents['methods'].keys())
methods = {}
for method_name in extracted_contents['methods']:
method_content = extracted_contents['methods'][method_name]
# verify parameters
# TODO(auggy): create parameters list
# verify examples
# TODO(auggy): I don't think we need anything special here...
methods[method_name] = method_content
output['methods'] = methods
# TODO(auggy): print to local file using Jinja templates
print "Body: %(body)s \n " + \
"Method List: %(methods_list)s \n" + \
"Methods: %(methods)s" \
% output
# TODO(auggy): post bugs to Launchpad using local files
# TODO(auggy): keep track of bugs...?
# TODO(auggy): option for retrieving and editing LP bugs...
def get_inc_files(path):
inc_files = {}
for filename in glob.glob(os.path.join(path, '*.inc')):
print "Processing %(filename)s..." % {'filename': filename}
f = open(filename, 'r')
# TODO(auggy): remove rest of path from filename
inc_files[filename] = f.read()
return inc_files
def split_inc_file(contents):
split_file = re.split(SPLIT_FILE_RE, contents)
return split_file
def extract_stuff(split_contents):
result = dict()
result['body'] = split_contents[0]
result['methods'] = dict(zip(map(lambda x: re.sub('\\n', '', x), split_contents[3::2]), split_contents[2::2]))
return result
# Allow for local debugging
if __name__ == '__main__':
main() | # -*- coding: utf-8 -*-
import glob
import os
import re
SPLIT_FILE_RE = re.compile(r'\n([\w\s]+)\n\=+\n', re.MULTILINE)
def main():
# TODO(auggy): args: inc files path
path = ''
output = {}
# get inc files
inc_files = get_inc_files(path)
for filename in inc_files:
contents = inc_files[filename]
# TODO(auggy): link to file on github??
# TODO(auggy): this should use docutils
extracted_contents = extract_stuff(split_inc_file(contents))
# verify body
output['body'] = extracted_contents['body']
# verify methods names
output['methods_list'] = '\n'.join(extracted_contents['methods'].keys())
methods = {}
for method_name in extracted_contents['methods']:
method_content = extracted_contents['methods'][method_name]
# verify parameters
# TODO(auggy): create parameters list
# verify examples
# TODO(auggy): I don't think we need anything special here...
methods[method_name] = method_content
output['methods'] = methods
# TODO(auggy): use Jinja templates
print "Body: %(body)s \n " + \
"Method List: %(methods_list)s \n" + \
"Methods: %(methods)s" \
% output
def get_inc_files(path):
inc_files = {}
for filename in glob.glob(os.path.join(path, '*.inc')):
print "Processing %(filename)s..." % {'filename': filename}
f = open(filename, 'r')
# TODO(auggy): remove rest of path from filename
inc_files[filename] = f.read()
return inc_files
def split_inc_file(contents):
split_file = re.split(SPLIT_FILE_RE, contents)
return split_file
def extract_stuff(split_contents):
result = dict()
result['body'] = split_contents[0]
result['methods'] = dict(zip(map(lambda x: re.sub('\\n', '', x), split_contents[3::2]), split_contents[2::2]))
return result
# Allow for local debugging
if __name__ == '__main__':
main() | apache-2.0 | Python |
60d923433aa34dcdd1ce7bafd2431d4778c4d394 | remove old config | timwaizenegger/swift-bluebox,timwaizenegger/swift-bluebox | appConfig.py | appConfig.py | """
Project Bluebox
2015, University of Stuttgart, IPVS/AS
"""
"""
Project Bluebox
Copyright (C) <2015> <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
#swift_type = "BluemixV1Auth"
#swift_url = "https://swift.ng.bluemix.net/global_auth/631afd9d-89a3-4bc7-b1e4-7e1723c6195a/a898adc0-770a-4c6a-874b-3f9dc353dabb"
#swift_user = "1624de7f5cbaa7b897ec6abd03a80ac59c3d2547"
#swift_pw = "ef6fd13111aaab99d94a0c9cf8daa05d4ebd2b0fa007ff819637f96dc8c9"
"""
swift_type = "BluemixV1Auth"
swift_url ="https://swift.ng.bluemix.net/global_auth/f510262a-38f3-4489-b250-15fe8927066f/e9a28ae3-0c75-4b96-8f3d-238f63efe93c"
swift_user = "5eedc87ad5782a811406fc45a5cd26b01d07a492"
swift_pw = "dbc1a784e0269c38b7908c3d2a9d921a19fa85be80cd185d088b200b7812"
"""
swift_type = "regular"
swift_url = "http://192.168.209.204:8080/auth/v1.0"
swift_user = "test:tester"
swift_pw = "testing" | """
Project Bluebox
2015, University of Stuttgart, IPVS/AS
"""
"""
Project Bluebox
Copyright (C) <2015> <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
"""
this is the current config file for bluebox. define the server connection below as either of the two types:
"""
"""
swift_type = "BluemixV1Auth"
swift_url ="https://swift.ng.bluemix.net/global_auth/?????"
swift_user = "?????"
swift_pw = "?????"
"""
swift_type = "regular"
swift_url = "http://?????/auth/v1.0"
swift_user = "<account>:<user>"
swift_pw = "<pw>"
| mit | Python |
c97d4cce296e9505633404790847183122638cd7 | Revert crippling of test_old_api | jaberg/nengo,jaberg/nengo | nengo/test/test_old_api.py | nengo/test/test_old_api.py | import numpy as np
from nengo.old_api import Network
from matplotlib import pyplot as plt
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def test_basic_1(show=False):
"""
Create a network with sin(t) being represented by
a population of spiking neurons. Assert that the
decoded value from the population is close to the
true value (which is input to the population).
Expected duration of test: about .7 seconds
"""
net = Network('Runtime Test', dt=0.001, seed=123)
print 'make_input'
net.make_input('in', value=np.sin)
print 'make A'
net.make('A', 1000, 1)
print 'connecting in -> A'
net.connect('in', 'A')
A_fast_probe = net.make_probe('A', dt_sample=0.01, pstc=0.001)
A_med_probe = net.make_probe('A', dt_sample=0.01, pstc=0.01)
A_slow_probe = net.make_probe('A', dt_sample=0.01, pstc=0.1)
in_probe = net.make_probe('in', dt_sample=0.01, pstc=0.01)
net.run(1.0)
target = np.sin(np.arange(0, 1000, 10) / 1000.)
target.shape = (100, 1)
assert np.allclose(target, in_probe.get_data())
assert rmse(target, A_fast_probe.get_data()) < .25
assert rmse(target, A_med_probe.get_data()) < .025
assert rmse(target, A_slow_probe.get_data()) < 0.1
for speed in 'fast', 'med', 'slow':
probe = locals()['A_%s_probe' % speed]
data = np.asarray(probe.get_data()).flatten()
plt.plot(data, label=speed)
in_data = np.asarray(in_probe.get_data()).flatten()
plt.plot(in_data, label='in')
plt.legend(loc='upper left')
if show:
plt.show()
| import numpy as np
from nengo.old_api import Network
from matplotlib import pyplot as plt
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def test_basic_1(show=False):
"""
Create a network with sin(t) being represented by
a population of spiking neurons. Assert that the
decoded value from the population is close to the
true value (which is input to the population).
Expected duration of test: about .7 seconds
"""
net = Network('Runtime Test', dt=0.001, seed=123)
print 'make_input'
net.make_input('in', value=np.sin)
print 'make A'
net.make('A', 1000, 1)
print 'connecting in -> A'
net.connect('in', 'A')
A_fast_probe = net.make_probe('A', dt_sample=0.01, pstc=0.001)
A_med_probe = net.make_probe('A', dt_sample=0.01, pstc=0.01)
A_slow_probe = net.make_probe('A', dt_sample=0.01, pstc=0.1)
in_probe = net.make_probe('in', dt_sample=0.01, pstc=0.01)
simtime = 1.0
net.run(simtime)
target = np.sin(np.arange(0, simtime / .001) / 1000.)
print rmse(target, A_fast_probe.get_data())
print rmse(target, A_med_probe.get_data())
print rmse(target, A_slow_probe.get_data())
#assert np.allclose(target[::10], in_probe.get_data())
#assert rmse(target, A_fast_probe.get_data()) < .25
#assert rmse(target, A_med_probe.get_data()) < .025
#assert rmse(target, A_slow_probe.get_data()) < 0.1
for speed in 'fast', 'med', 'slow':
probe = locals()['A_%s_probe' % speed]
data = np.asarray(probe.get_data()).flatten()
plt.plot(data, label=speed)
in_data = np.asarray(in_probe.get_data()).flatten()
plt.plot(in_data, label='in')
plt.legend(loc='upper left')
if show:
plt.show()
| mit | Python |
c2bca21718295b6400471395f5da3ca9d42e8a84 | Check if error available on output | modoboa/modoboa-dmarc,modoboa/modoboa-dmarc | modoboa_dmarc/tests/mixins.py | modoboa_dmarc/tests/mixins.py | """Test mixins."""
import os
import sys
import six
from django.core.management import call_command
from django.utils.six import StringIO
class CallCommandMixin(object):
"""A mixin to provide command execution shortcuts."""
def setUp(self):
"""Replace stdin"""
super(CallCommandMixin, self).setUp()
self.stdin = sys.stdin
def tearDown(self):
sys.stdin = self.stdin
def import_report(self, path):
"""Import test report from file."""
with open(path) as fp:
buf = six.StringIO(fp.read())
sys.stdin = buf
out = StringIO()
call_command("import_aggregated_report", "--pipe", stdout=out)
return out.getvalue()
def import_reports(self, folder="reports"):
"""Import reports from folder."""
path = os.path.join(os.path.dirname(__file__), folder)
for f in os.listdir(path):
fpath = os.path.join(path, f)
if f.startswith(".") or not os.path.isfile(fpath):
continue
self.import_report(fpath)
def import_fail_reports(self, folder="fail-reports"):
"""Import failed reports from folder."""
path = os.path.join(os.path.dirname(__file__), folder)
for f in os.listdir(path):
fpath = os.path.join(path, f)
if f.startswith(".") or not os.path.isfile(fpath):
continue
ret = self.import_report(fpath)
self.assertNotIn('ERROR-PARSING', ret)
| """Test mixins."""
import os
import sys
import six
from django.core.management import call_command
class CallCommandMixin(object):
"""A mixin to provide command execution shortcuts."""
def setUp(self):
"""Replace stdin"""
super(CallCommandMixin, self).setUp()
self.stdin = sys.stdin
def tearDown(self):
sys.stdin = self.stdin
def import_report(self, path):
"""Import test report from file."""
with open(path) as fp:
buf = six.StringIO(fp.read())
sys.stdin = buf
call_command("import_aggregated_report", "--pipe")
def import_reports(self, folder="reports"):
"""Import reports from folder."""
path = os.path.join(os.path.dirname(__file__), folder)
for f in os.listdir(path):
fpath = os.path.join(path, f)
if f.startswith(".") or not os.path.isfile(fpath):
continue
self.import_report(fpath)
def import_fail_reports(self, folder="fail-reports"):
"""Import failed reports from folder."""
path = os.path.join(os.path.dirname(__file__), folder)
for f in os.listdir(path):
fpath = os.path.join(path, f)
if f.startswith(".") or not os.path.isfile(fpath):
continue
self.import_report(fpath)
# TODO check return code different from 0
| mit | Python |
8c858df942af961ec475461814c32e2769ac310a | Remove unused class | controversial/ui2 | ui2/animate.py | ui2/animate.py | from objc_util import *
import time
def animate(animation, duration=0.25, delay=0.0, completion=None):
"""A drop-in replacement for ui.animate which supports easings."""
if completion is not None:
def c(cmd, success):
completion(success)
release_global(ObjCInstance(cmd))
oncomplete = ObjCBlock(c, argtypes=[c_void_p, c_void_p])
retain_global(oncomplete)
else:
oncomplete = None
UIView.animateWithDuration_delay_options_animations_completion_(
duration, delay, 0, ObjCBlock(animation), oncomplete
)
| from objc_util import *
import time
CATransaction = ObjCClass("CATransaction")
def animate(animation, duration=0.25, delay=0.0, completion=None):
"""A drop-in replacement for ui.animate which supports easings."""
if completion is not None:
def c(cmd, success):
completion(success)
release_global(ObjCInstance(cmd))
oncomplete = ObjCBlock(c, argtypes=[c_void_p, c_void_p])
retain_global(oncomplete)
else:
oncomplete = None
UIView.animateWithDuration_delay_options_animations_completion_(
duration, delay, 0, ObjCBlock(animation), oncomplete
)
| mit | Python |
c2822ddb0297600c1b70c543f97b719b03d9b202 | update routes : /api | buildbuild/buildbuild,buildbuild/buildbuild,buildbuild/buildbuild | buildbuild/buildbuild/urls.py | buildbuild/buildbuild/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from users import views
admin.autodiscover()
from users.views import Login
from users.views import Logout
from buildbuild.views import Home
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'buildbuild.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', Home.as_view(), name='home'),
url(r'^api/', include('api.urls', namespace="api")),
url(r'^login/', Login.as_view(), name="login"),
url(r'^logout/', Logout.as_view(), name="logout"),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include('users.urls',namespace='users')),
)
| from django.conf.urls import patterns, include, url
from django.contrib import admin
from users import views
admin.autodiscover()
from users.views import Login
from users.views import Logout
from buildbuild.views import Home
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'buildbuild.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', Home.as_view(), name='home'),
url(r'^login/', Login.as_view(), name="login"),
url(r'^logout/', Logout.as_view(), name="logout"),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include('users.urls',namespace='users')),
)
| bsd-3-clause | Python |
bdef19ef909fe3d7b8e90fb4b470bbe56765e25d | Bump to 12 gunicorn workers for 8GB linode | jonafato/vim-awesome,vim-awesome/vim-awesome,vim-awesome/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,divad12/vim-awesome,shaialon/vim-awesome,starcraftman/vim-awesome,shaialon/vim-awesome,jonafato/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,shaialon/vim-awesome,jonafato/vim-awesome,starcraftman/vim-awesome,starcraftman/vim-awesome,vim-awesome/vim-awesome,jonafato/vim-awesome,divad12/vim-awesome,shaialon/vim-awesome,starcraftman/vim-awesome | conf/gunicorn.py | conf/gunicorn.py | # Paths are relative to $HOME
pythonpath = 'vim-awesome/web'
pidfile = '.gunicorn.pid'
daemon = True
accesslog = 'logs/gunicorn/access.log'
errorlog = 'logs/gunicorn/error.log'
# Recommendation is 2 * NUM_CORES + 1. See
# http://gunicorn-docs.readthedocs.org/en/latest/design.html#how-many-workers
workers = 12
| # Paths are relative to $HOME
pythonpath = 'vim-awesome/web'
pidfile = '.gunicorn.pid'
daemon = True
accesslog = 'logs/gunicorn/access.log'
errorlog = 'logs/gunicorn/error.log'
# Recommendation is 2 * NUM_CORES + 1. See
# http://gunicorn-docs.readthedocs.org/en/latest/design.html#how-many-workers
workers = 2
| mit | Python |
44f026d434aa4cfe08d1bf0871d28f303b63add8 | bump version because of PyPI rules | scikit-hep/uproot,scikit-hep/uproot,scikit-hep/uproot,scikit-hep/uproot | uproot/version.py | uproot/version.py | #!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
__version__ = "2.5.21"
version = __version__
version_info = tuple(re.split(r"[-\.]", __version__))
del re
| #!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
__version__ = "2.5.20"
version = __version__
version_info = tuple(re.split(r"[-\.]", __version__))
del re
| bsd-3-clause | Python |
c9ccb83f196f3b4e431fd5778233de7e3d6b7624 | Make has_hashes a prop | hanjae/upstream,Storj/upstream | upstream/chunk.py | upstream/chunk.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from upstream.exc import ChunkError
class Chunk(object):
def __init__(self, filehash=None, decryptkey=None, filename=None,
filepath=None):
""" Stores information about an encryted chunk. Allows for
format conversions.
:param filehash: The hash for a file.
:param decryptkey: The decryption key for a file.
:param filename: Name of the file(destroyed on encryption).
:param filepath: Location of the file.
"""
self.filehash = filehash
self.decryptkey = decryptkey
self.filename = filename
self.filepath = filepath
def from_uri(self, uri):
"""
:param uri: URI as a string
:return:
"""
try:
self.filehash, self.decryptkey = str(uri).split("?key=")
except:
raise ChunkError("%s not format of <hash>?key=<key>")
def from_json(self, json_str):
self.json_str = json_str
data = json.loads(json_str)
self.filehash = data['filehash']
self.decryptkey = data['key']
@property
def uri(self):
if not self.has_hashes:
raise ChunkError("Missing filehash or decryptkey")
return self.filehash + "?key=" + self.decryptkey
def get_hashes(self):
if not self.has_hashes:
raise ChunkError("Missing filehash or decryptkey")
return self.filehash, self.decryptkey
def get_json(self):
if not self.has_hashes:
raise ChunkError("Missing filehash or decryptkey")
return json.dumps(
{
"key": self.decryptkey,
"filehash": self.filehash,
}
)
@property
def has_hashes(self):
return self.filehash and self.decryptkey
# Extra metadata
def set_filename(self, filename):
self.filename = filename
def set_filepath(self, filepath):
self.filepath = filepath
def get_filename(self):
return self.filename
def get_filepath(self):
return self.filepath
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from upstream.exc import ChunkError
class Chunk(object):
def __init__(self, filehash=None, decryptkey=None, filename=None,
filepath=None):
""" Stores information about an encryted chunk. Allows for
format conversions.
:param filehash: The hash for a file.
:param decryptkey: The decryption key for a file.
:param filename: Name of the file(destroyed on encryption).
:param filepath: Location of the file.
"""
self.filehash = filehash
self.decryptkey = decryptkey
self.filename = filename
self.filepath = filepath
def from_uri(self, uri):
"""
:param uri: URI as a string
:return:
"""
try:
self.filehash, self.decryptkey = str(uri).split("?key=")
except:
raise ChunkError("%s not format of <hash>?key=<key>")
def from_json(self, json_str):
self.json_str = json_str
data = json.loads(json_str)
self.filehash = data['filehash']
self.decryptkey = data['key']
@property
def uri(self):
if not self.has_hashes:
raise ChunkError("Missing filehash or decryptkey")
return self.filehash + "?key=" + self.decryptkey
def get_hashes(self):
if not self.has_hashes:
raise ChunkError("Missing filehash or decryptkey")
return self.filehash, self.decryptkey
def get_json(self):
if not self.has_hashes:
raise ChunkError("Missing filehash or decryptkey")
return json.dumps(
{
"key": self.decryptkey,
"filehash": self.filehash,
}
)
def has_hashes(self):
return self.filehash and self.decryptkey
# Extra metadata
def set_filename(self, filename):
self.filename = filename
def set_filepath(self, filepath):
self.filepath = filepath
def get_filename(self):
return self.filename
def get_filepath(self):
return self.filepath
| mit | Python |
ee28ede60ad70c0e23edd743fc9b4ff83c13dfaa | use parameterized test | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | tests/cupy_tests/math_tests/test_window.py | tests/cupy_tests/math_tests/test_window.py | import unittest
from cupy import testing
@testing.parameterize(
*testing.product({
'm': [0, 1, -1, 1024],
'name': ['blackman', 'hamming', 'hanning'],
})
)
class TestWindow(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_allclose(atol=1e-5)
def test_window(self, xp):
return getattr(xp, self.name)(self.m)
| import unittest
from cupy import testing
@testing.gpu
class TestWindow(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_allclose(atol=1e-5)
def check_0(self, name, xp):
a = 0
return getattr(xp, name)(a)
@testing.numpy_cupy_allclose(atol=1e-5)
def check_1(self, name, xp):
a = 1
return getattr(xp, name)(a)
@testing.numpy_cupy_allclose(atol=1e-5)
def check_negative(self, name, xp):
a = -1
return getattr(xp, name)(a)
@testing.numpy_cupy_allclose(atol=1e-5)
def check_large(self, name, xp):
a = 1024
return getattr(xp, name)(a)
def check_all(self, name):
self.check_0(name)
self.check_1(name)
self.check_negative(name)
self.check_large(name)
def test_blackman(self):
self.check_all('blackman')
def test_hamming(self):
self.check_all('hamming')
def test_hanning(self):
self.check_all('hanning')
| mit | Python |
864e567cc36d6c303eeac546738fb6c7e2619ecc | Update the file_inject command so that it automatically creates directories. | coreos/openstack-guest-agents-unix,prometheanfire/openstack-guest-agents-unix,joejulian/openstack-guest-agents-unix,rackerlabs/openstack-guest-agents-unix,joejulian/openstack-guest-agents-unix,rackerlabs/openstack-guest-agents-unix,coreos/openstack-guest-agents-unix,rackerlabs/openstack-guest-agents-unix,prometheanfire/openstack-guest-agents-unix,coreos/openstack-guest-agents-unix,prometheanfire/openstack-guest-agents-unix,joejulian/openstack-guest-agents-unix,coreos/openstack-guest-agents-unix | unix/commands/file_inject.py | unix/commands/file_inject.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON File injection plugin
"""
import base64
import commands
import os
import os.path
class FileInject(commands.CommandBase):
def __init__(self, *args, **kwargs):
pass
@commands.command_add('injectfile')
def injectfile_cmd(self, data):
try:
b64_decoded = base64.b64decode(data)
except:
return (500, "Error doing base64 decoding of data")
(filename, data) = b64_decoded.split(',', 1)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(filename, 'w')
f.write(data)
f.close()
return (0, "")
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON File injection plugin
"""
import base64
import commands
class FileInject(commands.CommandBase):
def __init__(self, *args, **kwargs):
pass
@commands.command_add('injectfile')
def injectfile_cmd(self, data):
try:
b64_decoded = base64.b64decode(data)
except:
return (500, "Error doing base64 decoding of data")
(filename, data) = b64_decoded.split(',', 1)
f = open(filename, 'w')
f.write(data)
f.close()
return (0, "")
| apache-2.0 | Python |
3cafa831a5c13a119943dbbf03df5b6820d07abb | Update ada_boost_all_features.py | archonren/project | algorithms/ada_boost_all_features.py | algorithms/ada_boost_all_features.py | import os
from scipy.io import loadmat
from numpy import array, vstack, reshape, delete
from sklearn.metrics import precision_recall_fscore_support
from sklearn.ensemble import AdaBoostClassifier
from sklearn import preprocessing
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
def normalize_data(x):
scaler = preprocessing.StandardScaler().fit(x)
return scaler
def adaboost(D, num_of_files):
feature = D['feature_vector']
target = D['target']
print feature.shape
#print feature[50]
um_targets = vstack((((target[:,i])[0]) for i in range(num_of_files))) #[0:385]
print um_targets.shape
#print "####################################################"
#print ((target[:,0])[0])[0:385].shape
train_size = int (0.6 * len(feature))
validation_size = int (0.1 * len(feature))
train_d = feature[0:train_size]
validation_d = feature[train_size:(train_size+validation_size)]
test_d = feature[(train_size+validation_size):]
train_t = um_targets[0:train_size]
validation_t = um_targets[train_size:(train_size+validation_size)]
test_t = um_targets[(train_size+validation_size):]
#print train_t.shape
#print train_d.shape
#print train_d[50]
#print validation_t[:,0].shape
scaler = normalize_data(train_d)
#scale the training data so that each feature has zero mean and unit variance
scaled_tr_data = scaler.transform(train_d)
#print (scaled_tr_data)
scaled_validn_data = scaler.transform(validation_d)
#scale the test data in a similar fashion
scaled_test_data = scaler.transform(test_d)
clf = AdaBoostClassifier(n_estimators=1000)
clf.fit(scaled_tr_data, train_t[:,0])
pred = clf.predict(scaled_test_data)
print("Number of mislabeled points out of a total %d points : %d" % (test_d.shape[0], (test_t[:,0] != pred).sum()))
print precision_recall_fscore_support(test_t[:,0], pred)
UM = loadmat("C:\Users\Joms\Desktop\um\um_all_data.mat")
UMM = loadmat("C:\Users\Joms\Desktop\umm\umm_all_data.mat")
UU = loadmat("C:\Users\Joms\Desktop\uu\uu_all_data.mat")
print "UM"
adaboost(UM, 95)
print "UMM"
adaboost(UMM, 96)
print "UU"
adaboost(UU, 98)
| import os
from scipy.io import loadmat
from numpy import array, vstack, reshape, delete
from sklearn.metrics import precision_recall_fscore_support
from sklearn.ensemble import AdaBoostClassifier
from sklearn import preprocessing
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
def normalize_data(x):
scaler = preprocessing.StandardScaler().fit(x)
return scaler
def adaboost(D):
feature = D['feature_vector']
target = D['target']
print feature.shape
#print feature[50]
um_targets = vstack((((target[:,i])[0]) for i in range(95))) #[0:385]
print um_targets.shape
#print "####################################################"
#print ((target[:,0])[0])[0:385].shape
train_size = int (0.6 * len(feature))
validation_size = int (0.1 * len(feature))
train_d = feature[0:train_size]
validation_d = feature[train_size:(train_size+validation_size)]
test_d = feature[(train_size+validation_size):]
train_t = um_targets[0:train_size]
validation_t = um_targets[train_size:(train_size+validation_size)]
test_t = um_targets[(train_size+validation_size):]
#print train_t.shape
#print train_d.shape
#print train_d[50]
#print validation_t[:,0].shape
scaler = normalize_data(train_d)
#scale the training data so that each feature has zero mean and unit variance
scaled_tr_data = scaler.transform(train_d)
#print (scaled_tr_data)
scaled_validn_data = scaler.transform(validation_d)
#scale the test data in a similar fashion
scaled_test_data = scaler.transform(test_d)
clf = AdaBoostClassifier(n_estimators=1000)
clf.fit(scaled_tr_data, train_t[:,0])
pred = clf.predict(scaled_test_data)
print("Number of mislabeled points out of a total %d points : %d" % (test_d.shape[0], (test_t[:,0] != pred).sum()))
print precision_recall_fscore_support(test_t[:,0], pred)
UM = loadmat("C:\Users\Joms\Desktop\um\um_all_data.mat")
UMM = loadmat("C:\Users\Joms\Desktop\umm\umm_all_data.mat")
UU = loadmat("C:\Users\Joms\Desktop\uu\uu_all_data.mat")
print "UM"
adaboost(UM)
print "UMM"
adaboost(UMM)
print "UU"
adaboost(UU)
| mit | Python |
59cca2c867998a936405a87b704f674a271a13d8 | Add required dependencies (#9427) | mociepka/saleor,mociepka/saleor,mociepka/saleor | saleor/checkout/migrations/0040_add_handle_checkouts_permission.py | saleor/checkout/migrations/0040_add_handle_checkouts_permission.py | # Generated by Django 3.2.12 on 2022-03-08 10:35
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
def assing_permissions(apps, schema_editor):
# force post signal as permissions are created in post migrate signals
# related Django issue https://code.djangoproject.com/ticket/23422
emit_post_migrate_signal(2, False, "default")
Permission = apps.get_model("auth", "Permission")
App = apps.get_model("app", "App")
Group = apps.get_model("auth", "Group")
handle_checkouts = Permission.objects.filter(
codename="handle_checkouts", content_type__app_label="checkout"
).first()
manage_checkouts = Permission.objects.filter(
codename="manage_checkouts", content_type__app_label="checkout"
).first()
apps = App.objects.filter(
permissions=manage_checkouts,
)
for app in apps.iterator():
app.permissions.add(handle_checkouts)
groups = Group.objects.filter(
permissions=manage_checkouts,
)
for group in groups.iterator():
group.permissions.add(handle_checkouts)
class Migration(migrations.Migration):
dependencies = [
("product", "0159_auto_20220209_1501"),
("order", "0131_rename_order_token_id"),
("checkout", "0039_alter_checkout_email"),
]
operations = [
migrations.AlterModelOptions(
name="checkout",
options={
"ordering": ("-last_change", "pk"),
"permissions": (
("manage_checkouts", "Manage checkouts"),
("handle_checkouts", "Handle checkouts"),
),
},
),
migrations.RunPython(assing_permissions, migrations.RunPython.noop),
]
| # Generated by Django 3.2.12 on 2022-03-08 10:35
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
def assing_permissions(apps, schema_editor):
# force post signal as permissions are created in post migrate signals
# related Django issue https://code.djangoproject.com/ticket/23422
emit_post_migrate_signal(2, False, "default")
Permission = apps.get_model("auth", "Permission")
App = apps.get_model("app", "App")
Group = apps.get_model("auth", "Group")
handle_checkouts = Permission.objects.filter(
codename="handle_checkouts", content_type__app_label="checkout"
).first()
manage_checkouts = Permission.objects.filter(
codename="manage_checkouts", content_type__app_label="checkout"
).first()
apps = App.objects.filter(
permissions=manage_checkouts,
)
for app in apps.iterator():
app.permissions.add(handle_checkouts)
groups = Group.objects.filter(
permissions=manage_checkouts,
)
for group in groups.iterator():
group.permissions.add(handle_checkouts)
class Migration(migrations.Migration):
dependencies = [
("checkout", "0039_alter_checkout_email"),
]
operations = [
migrations.AlterModelOptions(
name="checkout",
options={
"ordering": ("-last_change", "pk"),
"permissions": (
("manage_checkouts", "Manage checkouts"),
("handle_checkouts", "Handle checkouts"),
),
},
),
migrations.RunPython(assing_permissions, migrations.RunPython.noop),
]
| bsd-3-clause | Python |
48f8f1b1e322f8e3fc8b738312e780645211d3e8 | Revert "piratebay: New URL" | laurent-george/weboob,frankrousseau/weboob,Boussadia/weboob,Boussadia/weboob,willprice/weboob,RouxRC/weboob,laurent-george/weboob,Boussadia/weboob,frankrousseau/weboob,Boussadia/weboob,nojhan/weboob-devel,Konubinix/weboob,sputnick-dev/weboob,RouxRC/weboob,yannrouillard/weboob,willprice/weboob,Konubinix/weboob,RouxRC/weboob,nojhan/weboob-devel,Konubinix/weboob,laurent-george/weboob,willprice/weboob,frankrousseau/weboob,yannrouillard/weboob,sputnick-dev/weboob,yannrouillard/weboob,sputnick-dev/weboob,nojhan/weboob-devel | modules/piratebay/browser.py | modules/piratebay/browser.py | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.tools.browser import BaseBrowser, BrowserHTTPNotFound
from .pages.index import IndexPage
from .pages.torrents import TorrentsPage, TorrentPage
__all__ = ['PiratebayBrowser']
class PiratebayBrowser(BaseBrowser):
DOMAIN = 'thepiratebay.se'
PROTOCOL = 'https'
ENCODING = 'utf-8'
USER_AGENT = BaseBrowser.USER_AGENTS['wget']
PAGES = {'https://thepiratebay.se/': IndexPage,
'https://thepiratebay.se/search/.*/0/7/0': TorrentsPage,
'https://thepiratebay.se/torrent/.*': TorrentPage
}
def iter_torrents(self, pattern):
self.location('https://thepiratebay.se/search/%s/0/7/0' % urllib.quote_plus(pattern.encode('utf-8')))
assert self.is_on_page(TorrentsPage)
return self.page.iter_torrents()
def get_torrent(self, id):
try:
self.location('https://thepiratebay.se/torrent/%s/' % id)
except BrowserHTTPNotFound:
return
if self.is_on_page(TorrentPage):
return self.page.get_torrent(id)
| # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.tools.browser import BaseBrowser, BrowserHTTPNotFound
from .pages.index import IndexPage
from .pages.torrents import TorrentsPage, TorrentPage
__all__ = ['PiratebayBrowser']
class PiratebayBrowser(BaseBrowser):
DOMAIN = 'thepiratebay.gl'
PROTOCOL = 'https'
ENCODING = 'utf-8'
USER_AGENT = BaseBrowser.USER_AGENTS['wget']
PAGES = {'https://thepiratebay.gl/': IndexPage,
'https://thepiratebay.gl/search/.*/0/7/0': TorrentsPage,
'https://thepiratebay.gl/torrent/.*': TorrentPage
}
def iter_torrents(self, pattern):
self.location('https://thepiratebay.gl/search/%s/0/7/0' % urllib.quote_plus(pattern.encode('utf-8')))
assert self.is_on_page(TorrentsPage)
return self.page.iter_torrents()
def get_torrent(self, id):
try:
self.location('https://thepiratebay.gl/torrent/%s/' % id)
except BrowserHTTPNotFound:
return
if self.is_on_page(TorrentPage):
return self.page.get_torrent(id)
| agpl-3.0 | Python |
781addf1e2ee5eb5cff71ee7b4da14671e208c3a | Fix for #57 | nccgroup/featherduster,nccgroup/featherduster | feathermodules/classical/columnar_transposition.py | feathermodules/classical/columnar_transposition.py | import cryptanalib as ca
import feathermodules
def break_columnar_transposition(ciphertexts):
arguments=feathermodules.current_options
results = []
for ciphertext in ciphertexts:
result = ca.break_columnar_transposition(ciphertext, num_answers=int(arguments['num_answers']))
result = '\n'.join(result)
results.append(result)
print 'Best results of columnar transposition solve(s):'
print '-'*80
print '\n'.join(results)
return results
feathermodules.module_list['column_trans'] = {
'attack_function':break_columnar_transposition,
'type':'classical',
'keywords':['transposition'],
'description':'A brute force attack against columnar transposition ciphers.',
'options':{'num_answers':'3'}
}
| import cryptanalib as ca
import feathermodules
def break_columnar_transposition(ciphertexts):
arguments=feathermodules.current_options
results = []
for ciphertext in ciphertexts:
results.append(ca.break_columnar_transposition(ciphertext, num_answers=int(arguments['num_answers'])))
print 'Best results of columnar transposition solve:'
print '-'*80
print '\n'.join(results)
return results
feathermodules.module_list['column_trans'] = {
'attack_function':break_columnar_transposition,
'type':'classical',
'keywords':['transposition'],
'description':'A brute force attack against columnar transposition ciphers.',
'options':{'num_answers':'3'}
}
| bsd-3-clause | Python |
53a2a514500aa19a8fbdcaa5701774d6983ff76d | add some initial email templates | Edmonton-Public-Library/centennial,Edmonton-Public-Library/centennial,Edmonton-Public-Library/centennial | util/email/email_template.py | util/email/email_template.py | REGISTRATION_NOTIFICATION = """Thank you {%username} for registering with EPL. To activate your account, you will need to verify your account by navigating to the following URL: {%registration_url}"""
PASSWORD_RESET_EMAIL = """To reset your password, please click on the following link: {%forgot_password_url}"""
| mit | Python | |
98552a4cb683e25ec9af53024e58644c04b55872 | Handle missing external files gracefully | mollyproject/mollyproject,mollyproject/mollyproject,mollyproject/mollyproject | molly/external_media/views.py | molly/external_media/views.py | from email.utils import formatdate
from datetime import datetime, timedelta
from time import mktime
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, Http404
from molly.utils.views import BaseView
from molly.utils.breadcrumbs import NullBreadcrumb
from models import ExternalImageSized
class IndexView(BaseView):
breadcrumb = NullBreadcrumb
def handle_GET(self, request, context):
raise Http404
class ExternalImageView(BaseView):
breadcrumb = NullBreadcrumb
def handle_GET(self, request, context, slug):
eis = get_object_or_404(ExternalImageSized, slug=slug)
try:
response = HttpResponse(open(eis.get_filename(), 'rb').read(),
mimetype=eis.content_type.encode('ascii'))
except IOError:
eis.delete()
raise Http404()
response['ETag'] = slug
response['Expires'] = formatdate(mktime((datetime.now() + timedelta(days=7)).timetuple()))
response['Last-Modified'] = formatdate(mktime(eis.external_image.last_updated.timetuple()))
return response
| from email.utils import formatdate
from datetime import datetime, timedelta
from time import mktime
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, Http404
from molly.utils.views import BaseView
from molly.utils.breadcrumbs import NullBreadcrumb
from models import ExternalImageSized
class IndexView(BaseView):
breadcrumb = NullBreadcrumb
def handle_GET(self, request, context):
raise Http404
class ExternalImageView(BaseView):
breadcrumb = NullBreadcrumb
def handle_GET(self, request, context, slug):
eis = get_object_or_404(ExternalImageSized, slug=slug)
response = HttpResponse(open(eis.get_filename(), 'rb').read(), mimetype=eis.content_type.encode('ascii'))
response['ETag'] = slug
response['Expires'] = formatdate(mktime((datetime.now() + timedelta(days=7)).timetuple()))
response['Last-Modified'] = formatdate(mktime(eis.external_image.last_updated.timetuple()))
return response
| apache-2.0 | Python |
630908ea370ee863f70bbe60a425c825ee5f9d62 | Test fix | nick-bulleid/mopidy-frontpanel | mopidy_frontpanel/frontend.py | mopidy_frontpanel/frontend.py | from __future__ import unicode_literals
import logging
from mopidy.core import CoreListener # pylint: disable=import-error
import pykka # pylint: disable=import-error
from .menu import MenuModel
from .painter import Painter
from .input import Input
logger = logging.getLogger(__name__)
class FrontPanel(pykka.ThreadingActor, CoreListener):
def __init__(self, config, core):
super(FrontPanel, self).__init__()
self.core = core
self.menu = MenuModel(core)
self.painter = Painter(core, self.menu)
self.input = Input(self)
def on_start(self):
self.painter.update()
def handle_input(self, input_key):
#self.painter.print_text(input_key)
if input_key == "play":
self.core.playback.resume()
elif input_key == "pause":
self.core.playback.pause()
elif input_key == "stop":
self.core.playback.stop()
elif input_key == "vol_up":
pass
elif input_key == "vol_down":
pass
elif input_key == "next":
self.menu.next()
elif input_key == "prev":
self.menu.prev()
elif input_key == "select":
self.menu.select()
elif input_key == "back":
self.menu.up()
elif input_key == "exit":
self.menu.clear()
self.painter.update()
def track_playback_started(self, tl_track):
self.painter.update()
def track_playback_ended(self, tl_track, time_position):
self.painter.update()
| from __future__ import unicode_literals
import logging
from mopidy.core import CoreListener # pylint: disable=import-error
import pykka # pylint: disable=import-error
from .menu import BrowseMenu
from .painter import Painter
from .input import Input
logger = logging.getLogger(__name__)
class FrontPanel(pykka.ThreadingActor, CoreListener):
def __init__(self, config, core):
super(FrontPanel, self).__init__()
self.core = core
self.menu = MenuModel(core)
self.painter = Painter(core, self.menu)
self.input = Input(self)
def on_start(self):
self.painter.update()
def handle_input(self, input_key):
#self.painter.print_text(input_key)
if input_key == "play":
self.core.playback.resume()
elif input_key == "pause":
self.core.playback.pause()
elif input_key == "stop":
self.core.playback.stop()
elif input_key == "vol_up":
pass
elif input_key == "vol_down":
pass
elif input_key == "next":
self.menu.next()
elif input_key == "prev":
self.menu.prev()
elif input_key == "select":
self.menu.select()
elif input_key == "back":
self.menu.up()
elif input_key == "exit":
self.menu.clear()
self.painter.update()
def track_playback_started(self, tl_track):
self.painter.update()
def track_playback_ended(self, tl_track, time_position):
self.painter.update()
| apache-2.0 | Python |
e41c947bd4ce3fdee3793467373fbeba693ca5bd | bump version number | kapteyn-astro/kapteyn,kapteyn-astro/kapteyn,kapteyn-astro/kapteyn,kapteyn-astro/kapteyn,kapteyn-astro/kapteyn,kapteyn-astro/kapteyn | DISTRIBUTION/kapteyn/__init__.py | DISTRIBUTION/kapteyn/__init__.py | """Kapteyn package.
"""
from os import path
package_dir = path.abspath(path.dirname(__file__))
__all__=['celestial', 'wcs', 'wcsgrat', 'tabarray', 'maputils',
'mplutil', 'positions', 'shapes', 'rulers', 'filters',
'interpolation']
__version__='2.0.3b13'
| """Kapteyn package.
"""
from os import path
package_dir = path.abspath(path.dirname(__file__))
__all__=['celestial', 'wcs', 'wcsgrat', 'tabarray', 'maputils',
'mplutil', 'positions', 'shapes', 'rulers', 'filters',
'interpolation']
__version__='2.0.3b11'
| bsd-3-clause | Python |
ff8f63b755664d4970577d7ae1ee96316767d52c | remove redundant signal | theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs | bulbs/contributions/signals.py | bulbs/contributions/signals.py | from django.dispatch import receiver
from django.db.models.signals import m2m_changed, post_save
from bulbs.content.models import Content, FeatureType
from .models import ContributorRole, FeatureTypeRate
from .tasks import update_role_rates
from .utils import update_content_contributions
@receiver(post_save, sender=FeatureType)
def update_feature_type_rates(sender, instance, created, *args, **kwargs):
"""
Creates a default FeatureTypeRate for each role after the creation of a FeatureTypeRate.
"""
if created:
for role in ContributorRole.objects.all():
FeatureTypeRate.objects.create(role=role, feature_type=instance, rate=0)
@receiver(post_save, sender=ContributorRole)
def call_update_role_rates(sender, instance, * args, **kwargs):
update_role_rates.delay(instance.pk)
@receiver(m2m_changed, sender=Content.authors.through)
def update_contributions(sender, instance, action, model, pk_set, **kwargs):
"""Creates a contribution for each author added to an article.
"""
if action != 'pre_add':
return
else:
for author in model.objects.filter(pk__in=pk_set):
update_content_contributions(instance, author)
| from django.core.exceptions import ObjectDoesNotExist
from django.dispatch import receiver
from django.db.models.signals import m2m_changed, post_save
from bulbs.content.models import Content, FeatureType
from .models import Contribution, ContributorRole, FeatureTypeRate, ReportContent
from .tasks import update_role_rates
from .utils import update_content_contributions
@receiver(post_save, sender=FeatureType)
def update_feature_type_rates(sender, instance, created, *args, **kwargs):
"""
Creates a default FeatureTypeRate for each role after the creation of a FeatureTypeRate.
"""
if created:
for role in ContributorRole.objects.all():
FeatureTypeRate.objects.create(role=role, feature_type=instance, rate=0)
@receiver(post_save, sender=ContributorRole)
def call_update_role_rates(sender, instance, * args, **kwargs):
update_role_rates.delay(instance.pk)
@receiver(m2m_changed, sender=Content.authors.through)
def update_contributions(sender, instance, action, model, pk_set, **kwargs):
"""Creates a contribution for each author added to an article.
"""
if action != 'pre_add':
return
else:
for author in model.objects.filter(pk__in=pk_set):
update_content_contributions(instance, author)
@receiver(post_save, sender=Contribution)
def index_relations(sender, instance, **kwargs):
"""
We need to update the ReportContent object whenver a contribution is added.
"""
try:
proxy = ReportContent.reference.get(id=instance.content_id)
proxy.index()
except ObjectDoesNotExist:
pass
| mit | Python |
6af0cedc4a060d6015f44d90c2f0552957d38a14 | read Niancat Slack config from environment | dandeliondeathray/niancat-micro,dandeliondeathray/niancat-micro | niancat-slack/niancatslack.py | niancat-slack/niancatslack.py | from slackrest.app import SlackrestApp
from slackrest.command import Visibility, Method
import json
import os
class GetPuzzle:
pattern = '!nian'
url_format = '/puzzle'
visibility = Visibility.Any
body = None
method = Method.GET
class SetPuzzle:
pattern = '!sättnian {nian}'
url_format = '/puzzle'
visibility = Visibility.Any
method = Method.POST
@classmethod
def body(cls, nian, **kwargs):
return json.dumps({'puzzle': nian})
class ListUnsolution:
pattern = '!olösningar'
url_format = '/unsolution/{user_name}'
visibility = Visibility.Private
method = Method.GET
body = None
class AddUnsolution:
pattern = '!olösning {unsolution}'
url_format = '/unsolution/{user_name}'
visibility = Visibility.Private
method = Method.POST
@classmethod
def body(cls, unsolution, **kwargs):
return json.dumps({'unsolution': unsolution})
class CheckSolution:
pattern = '{solution}'
url_format = '/solution/{solution}'
visibility = Visibility.Private
method = Method.POST
@classmethod
def body(cls, user_name, **kwargs):
return json.dumps({'user': user_name})
class NiancatSlack(SlackrestApp):
def __init__(self, base_url, notification_channel_id):
commands = [GetPuzzle, SetPuzzle, ListUnsolution, AddUnsolution, CheckSolution]
SlackrestApp.__init__(self, base_url, commands, notification_channel_id)
def read_environment_var(name):
try:
os.environ[name]
except KeyError:
raise OSError("Missing required environment variable {}".format(name))
if __name__ == "__main__":
base_url = read_environment_var("NIANCAT_CHAT_BASE_URL")
notification_channel = read_environment_var("NOTIFICATION_CHANNEL")
app = NiancatSlack(base_url, notification_channel)
app.run_forever()
| from slackrest.app import SlackrestApp
from slackrest.command import Visibility, Method
import json
class GetPuzzle:
pattern = '!nian'
url_format = '/puzzle'
visibility = Visibility.Any
body = None
method = Method.GET
class SetPuzzle:
pattern = '!sättnian {nian}'
url_format = '/puzzle'
visibility = Visibility.Any
method = Method.POST
@classmethod
def body(cls, nian, **kwargs):
return json.dumps({'puzzle': nian})
class ListUnsolution:
pattern = '!olösningar'
url_format = '/unsolution/{user_name}'
visibility = Visibility.Private
method = Method.GET
body = None
class AddUnsolution:
pattern = '!olösning {unsolution}'
url_format = '/unsolution/{user_name}'
visibility = Visibility.Private
method = Method.POST
@classmethod
def body(cls, unsolution, **kwargs):
return json.dumps({'unsolution': unsolution})
class CheckSolution:
pattern = '{solution}'
url_format = '/solution/{solution}'
visibility = Visibility.Private
method = Method.POST
@classmethod
def body(cls, user_name, **kwargs):
return json.dumps({'user': user_name})
class NiancatSlack(SlackrestApp):
def __init__(self, base_url, notification_channel_id):
commands = [GetPuzzle, SetPuzzle, ListUnsolution, AddUnsolution, CheckSolution]
SlackrestApp.__init__(self, base_url, commands, notification_channel_id)
if __name__ == "__main__":
app = NiancatSlack('http://niancat-chat/v1', '#konsulatet')
app.run_forever()
| apache-2.0 | Python |
083c555dd73431ce8ff2b2479193807742836c1a | Remove explicit unnecessary variable init | alexpilotti/cloudbase-init,stefan-caraiman/cloudbase-init,cmin764/cloudbase-init,ader1990/cloudbase-init,chialiang-8/cloudbase-init,openstack/cloudbase-init,stackforge/cloudbase-init | cloudbaseinit/plugins/common/fileexecutils.py | cloudbaseinit/plugins/common/fileexecutils.py | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.plugins.common import execcmd
LOG = logging.getLogger(__name__)
FORMATS = {
"cmd": execcmd.Shell,
"exe": execcmd.Shell,
"sh": execcmd.Bash,
"py": execcmd.Python,
"ps1": execcmd.PowershellSysnative,
}
def exec_file(file_path):
ret_val = 0
ext = os.path.splitext(file_path)[1][1:].lower()
command = FORMATS.get(ext)
if not command:
# Unsupported
LOG.warning('Unsupported script file type: %s', ext)
return ret_val
try:
out, err, ret_val = command(file_path).execute()
except Exception as ex:
LOG.warning('An error occurred during file execution: \'%s\'', ex)
else:
LOG.debug('User_data stdout:\n%s', out)
LOG.debug('User_data stderr:\n%s', err)
LOG.info('Script "%(file_path)s" ended with exit code: %(ret_val)d',
{"file_path": file_path, "ret_val": ret_val})
return ret_val
| # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.plugins.common import execcmd
LOG = logging.getLogger(__name__)
FORMATS = {
"cmd": execcmd.Shell,
"exe": execcmd.Shell,
"sh": execcmd.Bash,
"py": execcmd.Python,
"ps1": execcmd.PowershellSysnative,
}
def exec_file(file_path):
ret_val = 0
out = err = None
ext = os.path.splitext(file_path)[1][1:].lower()
command = FORMATS.get(ext)
if not command:
# Unsupported
LOG.warning('Unsupported script file type: %s', ext)
return ret_val
try:
out, err, ret_val = command(file_path).execute()
except Exception as ex:
LOG.warning('An error occurred during file execution: \'%s\'', ex)
else:
LOG.debug('User_data stdout:\n%s', out)
LOG.debug('User_data stderr:\n%s', err)
LOG.info('Script "%(file_path)s" ended with exit code: %(ret_val)d',
{"file_path": file_path, "ret_val": ret_val})
return ret_val
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.