commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
9f1a4977e34dc01a0489655df826b63b84f7d3be | Use SunPy sample data for Solar Cycle example. | examples/solar_cycle_example.py | examples/solar_cycle_example.py | """
===============
The Solar Cycle
===============
This example shows the current and possible next solar cycle.
"""
import datetime
import matplotlib.pyplot as plt
import sunpy.lightcurve as lc
from sunpy.data.sample import NOAAINDICES_LIGHTCURVE, NOAAPREDICT_LIGHTCURVE
###############################################################################
# For this example we will use the SunPy sample data, if you want the current
# data, delete the argument to the ``create`` function. i.e.
# ``noaa = lc.NOAAIndicesLightCurve.create()``
noaa = lc.NOAAIndicesLightCurve.create(NOAAINDICES_LIGHTCURVE)
noaa_predict = lc.NOAAPredictIndicesLightCurve.create(NOAAPREDICT_LIGHTCURVE)
###############################################################################
# Next lets grab the data again to create a new data structure that we will
# shift by 12 years to simulate the next solar cycle. We will truncate the
# data to only plot what is necessary.
noaa2 = lc.NOAAIndicesLightCurve.create(NOAAINDICES_LIGHTCURVE)
noaa2.data = noaa2.data.shift(2, freq=datetime.timedelta(days=365*12))
noaa2 = noaa2.truncate('2021/04/01', '2030/01/01')
###############################################################################
# Finally lets plot everything together with some arbitrary range for the
# strength of the next solar cycle.
plt.plot(noaa.data.index, noaa.data['sunspot RI'], label='Sunspot Number')
plt.plot(noaa_predict.data.index, noaa_predict.data['sunspot'],
color='grey', label='Near-term Prediction')
plt.fill_between(noaa_predict.data.index, noaa_predict.data['sunspot low'],
noaa_predict.data['sunspot high'], alpha=0.3, color='grey')
plt.fill_between(noaa2.data.index, noaa2.data['sunspot RI smooth']*0.4,
noaa2.data['sunspot RI smooth']*1.3, alpha=0.3, color='grey',
label='Next Cycle Predict')
plt.ylim(0)
plt.text('2011-01-01', 120, 'Cycle 24', fontsize=16)
plt.text('2024-01-01', 120, 'Cycle 25', fontsize=16)
plt.ylabel('Sunspot Number')
plt.xlabel('Year')
plt.legend(loc=2, framealpha=0.5)
plt.show()
| """
===============
The Solar Cycle
===============
This example shows the current and possible next solar cycle.
"""
import datetime
import matplotlib.pyplot as plt
import sunpy.lightcurve as lc
###############################################################################
# Let's download the latest data from NOAA.
noaa = lc.NOAAIndicesLightCurve.create()
noaa_predict = lc.NOAAPredictIndicesLightCurve.create()
###############################################################################
# Next lets grab the data again to create a new data structure that we will
# shift by 12 years to simulate the next solar cycle. We will truncate the
# data to only plot what is necessary.
noaa2 = lc.NOAAIndicesLightCurve.create()
noaa2.data = noaa2.data.shift(2, freq=datetime.timedelta(days = 365*12))
noaa2 = noaa2.truncate('2021/04/01', '2030/01/01')
###############################################################################
# Finally lets plot everything together with some arbitrary range for the strength
# of the next solar cycle.
plt.plot(noaa.data.index, noaa.data['sunspot RI'], label='Sunspot Number')
plt.plot(noaa_predict.data.index,noaa_predict.data['sunspot'],color='grey', label='Near-term Prediction')
plt.fill_between(noaa_predict.data.index, noaa_predict.data['sunspot low'], noaa_predict.data['sunspot high'],
alpha = 0.3, color='grey')
plt.fill_between(noaa2.data.index, noaa2.data['sunspot RI smooth']*0.4, noaa2.data['sunspot RI smooth']*1.3,
alpha = 0.3, color='grey', label='Next Cycle Predict')
plt.ylim(0)
plt.text('2011-01-01', 120,'Cycle 24',fontsize=16)
plt.text('2024-01-01', 120,'Cycle 25',fontsize=16)
plt.ylabel('Sunspot Number')
plt.xlabel('Year')
plt.legend(loc=2, framealpha=0.5)
plt.show()
| Python | 0 |
0ac3750c2b8d0fc978c076604db3bfee1a47708f | allow name param to name tab widgets | examples/tabpanelwidget/Tabs.py | examples/tabpanelwidget/Tabs.py | import pyjd # dummy in pyjs
from pyjamas.ui.TabBar import TabBar
from pyjamas.ui.TabPanel import TabPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.Image import Image
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Composite import Composite
#from pyjamas.ui import DecoratorPanel
from pyjamas.ui import MouseListener
from pyjamas.ui import Event
from pyjamas import Window
from pyjamas.ui.DecoratorPanel import DecoratedTabPanel, DecoratorPanel
from pyjamas.ui.DecoratorPanel import DecoratorTitledPanel
#class PrettyTab(DecoratorPanel):
class PrettyTab(Composite):
def __init__(self, text, imageUrl):
DecoratorPanel.__init__(self, DecoratorPanel.DECORATE_ALL)
p = HorizontalPanel()
p.setSpacing(3)
self.img = Image(imageUrl)
self.txt = HTML(text)
p.add(self.img)
p.add(self.txt)
self.add(p)
def addClickListener(self, listener):
self.img.addClickListener(listener)
self.txt.addClickListener(listener)
class Tabs:
def onModuleLoad(self):
#red = PrettyTab("1638", "images/user_red.png")
#red.setStyleName('gwt-TabBarItem')
#green = PrettyTab("1640", "images/user_green.png")
#red.setStyleName('gwt-TabBarItem')
red = "1638"
green = "1640"
self.fTabs = DecoratedTabPanel(Size=("600px", "100%"))
self.fTabs.add(self.createImage("rembrandt/JohannesElison.jpg"),
red, True, name="johannes")
self.fTabs.add(self.createImage("rembrandt/SelfPortrait1640.jpg"),
green, True, name="self")
self.fTabs.add(self.createImage("rembrandt/LaMarcheNocturne.jpg"),
"1642", name="lamarche")
self.fTabs.add(self.createImage(
"rembrandt/TheReturnOfTheProdigalSon.jpg"),"1662",
"prodigal")
self.fTabs.add(HTML("shouldn't be here!"), None) # None means separator
self.fTabs.add(HTML("This is a Test.<br />Tab should be on right"),
"Test", "test")
self.fTabs.selectTab(0)
dp = DecoratorTitledPanel("Tabs", "bluetitle", "bluetitleicon",
["bluetop", "bluetop2", "bluemiddle", "bluebottom"])
dp.add(self.fTabs)
RootPanel().add(dp)
self.fTabs.addTabListener(self)
def createImage(self, imageUrl):
image = Image(imageUrl)
image.setStyleName("ks-images-Image")
p = VerticalPanel()
p.setHorizontalAlignment(HasAlignment.ALIGN_CENTER)
p.setVerticalAlignment(HasAlignment.ALIGN_MIDDLE)
p.add(image)
return p
def onTabSelected(self, sender, tabIndex):
pass
def onBeforeTabSelected(self, sender, tabIndex):
# 6 because one of them is the separator.
if self.fTabs.getWidgetCount() == 6:
self.fTabs.add(HTML("2nd Test.<br />Tab should be on right"),
"2nd Test", name="test2")
return True
self.fTabs.remove("test2")
return tabIndex != 6 # don't allow change to tab 6 - we're removing it!
if __name__ == '__main__':
pyjd.setup("./public/Tabs.html")
app = Tabs()
app.onModuleLoad()
pyjd.run()
| import pyjd # dummy in pyjs
from pyjamas.ui.TabBar import TabBar
from pyjamas.ui.TabPanel import TabPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.Image import Image
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Composite import Composite
#from pyjamas.ui import DecoratorPanel
from pyjamas.ui import MouseListener
from pyjamas.ui import Event
from pyjamas import Window
from pyjamas.ui.DecoratorPanel import DecoratedTabPanel, DecoratorPanel
from pyjamas.ui.DecoratorPanel import DecoratorTitledPanel
#class PrettyTab(DecoratorPanel):
class PrettyTab(Composite):
def __init__(self, text, imageUrl):
DecoratorPanel.__init__(self, DecoratorPanel.DECORATE_ALL)
p = HorizontalPanel()
p.setSpacing(3)
self.img = Image(imageUrl)
self.txt = HTML(text)
p.add(self.img)
p.add(self.txt)
self.add(p)
def addClickListener(self, listener):
self.img.addClickListener(listener)
self.txt.addClickListener(listener)
class Tabs:
def onModuleLoad(self):
#red = PrettyTab("1638", "images/user_red.png")
#red.setStyleName('gwt-TabBarItem')
#green = PrettyTab("1640", "images/user_green.png")
#red.setStyleName('gwt-TabBarItem')
red = "1638"
green = "1640"
self.fTabs = DecoratedTabPanel(Size=("600px", "100%"))
self.fTabs.add(self.createImage("rembrandt/JohannesElison.jpg"), red, True)
self.fTabs.add(self.createImage("rembrandt/SelfPortrait1640.jpg"), green, True)
self.fTabs.add(self.createImage("rembrandt/LaMarcheNocturne.jpg"), "1642")
self.fTabs.add(self.createImage("rembrandt/TheReturnOfTheProdigalSon.jpg"), "1662")
self.fTabs.add(HTML("shouldn't be here!"), None)
self.fTabs.add(HTML("This is a Test.<br />Tab should be on right"),
"Test")
self.fTabs.selectTab(0)
dp = DecoratorTitledPanel("Tabs", "bluetitle", "bluetitleicon",
["bluetop", "bluetop2", "bluemiddle", "bluebottom"])
dp.add(self.fTabs)
RootPanel().add(dp)
def createImage(self, imageUrl):
image = Image(imageUrl)
image.setStyleName("ks-images-Image")
p = VerticalPanel()
p.setHorizontalAlignment(HasAlignment.ALIGN_CENTER)
p.setVerticalAlignment(HasAlignment.ALIGN_MIDDLE)
p.add(image)
return p
if __name__ == '__main__':
pyjd.setup("./public/Tabs.html")
app = Tabs()
app.onModuleLoad()
pyjd.run()
| Python | 0 |
23cc84147f52cd4036398200916e68bd0f078050 | Fix print statemenet | stationspinner/evecentral/tasks.py | stationspinner/evecentral/tasks.py | from stationspinner.celery import app
from celery import chord
from stationspinner.evecentral.models import Market, MarketItem
from stationspinner.libs.pragma import get_location_name
from stationspinner.sde.models import InvType
from stationspinner.settings import STATIC_ROOT
from evelink.thirdparty.eve_central import EVECentral
from urllib2 import urlopen
from datetime import datetime
from pytz import UTC
from django.db.models import Q
from celery.utils.log import get_task_logger
from traceback import format_exc
from os.path import join
import csv
log = get_task_logger(__name__)
def _market_items():
market_items = InvType.objects.filter(published=True,
marketGroupID__lt=35000)
typeIDs = [i.pk for i in market_items.order_by('id')]
for i in xrange(0, len(typeIDs), 100):
yield typeIDs[i:i+100]
@app.task(name='evecentral.write_static_prices')
def write_static_prices():
for market in Market.objects.all():
market_items = MarketItem.objects.filter(locationID=market.locationID).order_by('typeName')
with open(join(STATIC_ROOT, '{0}.csv'.format(market.locationID)), 'wb') as output:
csvprices = csv.writer(output, delimiter=';')
for item in market_items:
try:
csvprices.writerow((item.typeID,
item.typeName,
item.buy_max,
item.buy_min,
item.buy_percentile,
item.buy_volume,
item.sell_max,
item.sell_min,
item.sell_percentile,
item.sell_volume))
except:
log.debug('Failed to render csv row for {0} at {1}.'.format(item, market.locationID))
@app.task(name='evecentral.update_all_markets')
def update_all_markets():
market_updates = []
for market in Market.objects.filter(
Q(cached_until__lte=datetime.now(tz=UTC)) | Q(cached_until=None)):
market_updates.extend(update_market(market.locationID))
market.updated()
log.info('Updating "{0}" market'.format(get_location_name(market.locationID)))
chord(market_updates, write_static_prices.s()).apply_async()
def update_market(locationID):
tasks = []
for typeIDs in _market_items():
tasks.append(parse_market_data.s(typeIDs, locationID))
return tasks
@app.task(name='evecentral.parse_market_data')
def parse_market_data(typeIDs, locationID):
ec = EVECentral(url_fetch_func=lambda url: urlopen(url).read())
try:
if locationID > 30000000:
data = ec.market_stats(type_ids=typeIDs, system=locationID)
else:
data = ec.market_stats(type_ids=typeIDs, regions=locationID)
except Exception, ex:
log.error('Could not update locationID {0}: {1}'.format(locationID,
format_exc(ex)))
return
for typeID, price_data in data.items():
prices = {}
for price_type in ('buy', 'sell'):
type_data = price_data[price_type]
for statistic, value in type_data.items():
prices['{0}_{1}'.format(price_type, statistic)] = value
MarketItem.objects.update_or_create(typeID=typeID,
locationID=locationID,
typeName=InvType.objects.get(pk=typeID).typeName,
defaults=prices)
| from stationspinner.celery import app
from celery import chord
from stationspinner.evecentral.models import Market, MarketItem
from stationspinner.libs.pragma import get_location_name
from stationspinner.sde.models import InvType
from stationspinner.settings import STATIC_ROOT
from evelink.thirdparty.eve_central import EVECentral
from urllib2 import urlopen
from datetime import datetime
from pytz import UTC
from django.db.models import Q
from celery.utils.log import get_task_logger
from traceback import format_exc
from os.path import join
import csv
log = get_task_logger(__name__)
def _market_items():
market_items = InvType.objects.filter(published=True,
marketGroupID__lt=35000)
typeIDs = [i.pk for i in market_items.order_by('id')]
for i in xrange(0, len(typeIDs), 100):
yield typeIDs[i:i+100]
@app.task(name='evecentral.write_static_prices')
def write_static_prices():
for market in Market.objects.all():
market_items = MarketItem.objects.filter(locationID=market.locationID).order_by('typeName')
with open(join(STATIC_ROOT, '{0}.csv'.format(market.locationID)), 'wb') as output:
csvprices = csv.writer(output, delimiter=';')
for item in market_items:
try:
csvprices.writerow((item.typeID,
item.typeName,
item.buy_max,
item.buy_min,
item.buy_percentile,
item.buy_volume,
item.sell_max,
item.sell_min,
item.sell_percentile,
item.sell_volume))
except:
print item
@app.task(name='evecentral.update_all_markets')
def update_all_markets():
market_updates = []
for market in Market.objects.filter(
Q(cached_until__lte=datetime.now(tz=UTC)) | Q(cached_until=None)):
market_updates.extend(update_market(market.locationID))
market.updated()
log.info('Updating "{0}" market'.format(get_location_name(market.locationID)))
chord(market_updates, write_static_prices.s()).apply_async()
def update_market(locationID):
tasks = []
for typeIDs in _market_items():
tasks.append(parse_market_data.s(typeIDs, locationID))
return tasks
@app.task(name='evecentral.parse_market_data')
def parse_market_data(typeIDs, locationID):
ec = EVECentral(url_fetch_func=lambda url: urlopen(url).read())
try:
if locationID > 30000000:
data = ec.market_stats(type_ids=typeIDs, system=locationID)
else:
data = ec.market_stats(type_ids=typeIDs, regions=locationID)
except Exception, ex:
log.error('Could not update locationID {0}: {1}'.format(locationID,
format_exc(ex)))
return
for typeID, price_data in data.items():
prices = {}
for price_type in ('buy', 'sell'):
type_data = price_data[price_type]
for statistic, value in type_data.items():
prices['{0}_{1}'.format(price_type, statistic)] = value
MarketItem.objects.update_or_create(typeID=typeID,
locationID=locationID,
typeName=InvType.objects.get(pk=typeID).typeName,
defaults=prices)
| Python | 0.99991 |
ca57e29c15ad02dee3cdad0d2159cbe33c15d6e0 | fix expire cache | corehq/apps/app_manager/signals.py | corehq/apps/app_manager/signals.py | from __future__ import absolute_import
from __future__ import unicode_literals
from django.dispatch.dispatcher import Signal
from corehq.apps.callcenter.app_parser import get_call_center_config_from_app
from corehq.apps.domain.models import Domain
from dimagi.utils.logging import notify_exception
def create_app_structure_repeat_records(sender, application, **kwargs):
from corehq.motech.repeaters.models import AppStructureRepeater
domain = application.domain
if domain:
repeaters = AppStructureRepeater.by_domain(domain)
for repeater in repeaters:
repeater.register(application)
def update_callcenter_config(sender, application, **kwargs):
if not application.copy_of:
return
try:
domain = Domain.get_by_name(application.domain)
cc_config = domain.call_center_config
if not cc_config or not (cc_config.fixtures_are_active() and cc_config.config_is_valid()):
return
app_config = get_call_center_config_from_app(application)
save = cc_config.update_from_app_config(app_config)
if save:
cc_config.save()
except Exception:
notify_exception(None, "Error updating CallCenter config for app build")
def expire_latest_enabled_build_profiles(sender, application, **kwargs):
from corehq.apps.app_manager.util import get_latest_enabled_build_for_profile
from corehq.apps.app_manager.util import get_enabled_build_profiles_for_version
if application.copy_of:
for build_profile_id in application.build_profiles:
get_latest_enabled_build_for_profile.clear(application.domain, build_profile_id)
get_enabled_build_profiles_for_version.clear(application.get_id, application.version)
app_post_save = Signal(providing_args=['application'])
app_post_save.connect(create_app_structure_repeat_records)
app_post_save.connect(update_callcenter_config)
app_post_save.connect(expire_latest_enabled_build_profiles)
app_post_release = Signal(providing_args=['application'])
| from __future__ import absolute_import
from __future__ import unicode_literals
from django.dispatch.dispatcher import Signal
from corehq.apps.callcenter.app_parser import get_call_center_config_from_app
from corehq.apps.domain.models import Domain
from dimagi.utils.logging import notify_exception
def create_app_structure_repeat_records(sender, application, **kwargs):
from corehq.motech.repeaters.models import AppStructureRepeater
domain = application.domain
if domain:
repeaters = AppStructureRepeater.by_domain(domain)
for repeater in repeaters:
repeater.register(application)
def update_callcenter_config(sender, application, **kwargs):
if not application.copy_of:
return
try:
domain = Domain.get_by_name(application.domain)
cc_config = domain.call_center_config
if not cc_config or not (cc_config.fixtures_are_active() and cc_config.config_is_valid()):
return
app_config = get_call_center_config_from_app(application)
save = cc_config.update_from_app_config(app_config)
if save:
cc_config.save()
except Exception:
notify_exception(None, "Error updating CallCenter config for app build")
def expire_latest_enabled_build_profiles(sender, application, **kwargs):
from corehq.apps.app_manager.util import get_latest_enabled_build_for_profile
from corehq.apps.app_manager.util import get_enabled_build_profiles_for_version
if application.copy_of:
for build_profile_id in application.build_profiles:
get_latest_enabled_build_for_profile.clear(application.domain, build_profile_id)
get_enabled_build_profiles_for_version(application.get_id, application.version)
app_post_save = Signal(providing_args=['application'])
app_post_save.connect(create_app_structure_repeat_records)
app_post_save.connect(update_callcenter_config)
app_post_save.connect(expire_latest_enabled_build_profiles)
app_post_release = Signal(providing_args=['application'])
| Python | 0.000001 |
55c0d8912750ad8ddc702213c340c02d10638640 | Test function | corehq/apps/sms/tests/test_util.py | corehq/apps/sms/tests/test_util.py | #!/usr/bin/env python
from django.test import TestCase
from nose.tools import assert_false, assert_true
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.mixin import apply_leniency
from corehq.apps.sms.util import (
ContactNotFoundException,
clean_phone_number,
get_contact,
is_contact_active,
is_superuser_or_contractor,
)
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.form_processor.utils import is_commcarecase
from corehq.util.test_utils import create_test_case, flag_enabled
class UtilTestCase(TestCase):
def setUp(self):
self.domain = 'test-domain'
self.user = CommCareUser.create(self.domain, 'test-user', '123')
def tearDown(self):
self.user.delete()
def testCleanPhoneNumber(self):
phone_number = " 324 23-23421241"
cleaned = clean_phone_number(phone_number)
self.assertEqual(cleaned, "+3242323421241")
@run_with_all_backends
def test_get_contact_for_case(self):
with create_test_case(self.domain, 'contact', 'test-case') as case:
contact = get_contact(self.domain, case.case_id)
self.assertEqual(contact.case_id, case.case_id)
self.assertTrue(is_commcarecase(contact))
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain + 'x', case.case_id)
def test_get_contact_for_user(self):
contact = get_contact(self.domain, self.user.get_id)
self.assertEqual(contact.get_id, self.user.get_id)
self.assertTrue(isinstance(contact, CommCareUser))
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain + 'x', self.user.get_id)
def test_contact_not_found(self):
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain, 'this-id-should-not-be-found')
@run_with_all_backends
def test_is_contact_active_for_case(self):
with create_test_case(self.domain, 'contact', 'test-case') as case:
self.assertTrue(is_contact_active(self.domain, 'CommCareCase', case.case_id))
update_case(self.domain, case.case_id, close=True)
self.assertFalse(is_contact_active(self.domain, 'CommCareCase', case.case_id))
def test_is_contact_active_for_user(self):
self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
self.user.is_active = False
self.user.save()
self.assertFalse(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
self.user.is_active = True
self.user.save()
self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
def test_apply_leniency(self):
self.assertEqual('16175551234', apply_leniency(' 1 (617) 555-1234 '))
self.assertEqual('16175551234', apply_leniency(' 1.617.555.1234 '))
self.assertEqual('16175551234', apply_leniency(' +1 617 555 1234 '))
def test_contractor():
user = CouchUser(username="eric")
with flag_enabled('IS_CONTRACTOR'):
assert_true(is_superuser_or_contractor(user))
def test_superuser():
user = CouchUser(username="john", is_superuser=True)
assert_true(is_superuser_or_contractor(user))
def test_normal_user():
user = CouchUser(username="michael")
assert_false(is_superuser_or_contractor(user))
| #!/usr/bin/env python
from django.test import TestCase
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.mixin import apply_leniency
from corehq.apps.sms.util import (
ContactNotFoundException,
clean_phone_number,
get_contact,
is_contact_active,
)
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.form_processor.utils import is_commcarecase
from corehq.util.test_utils import create_test_case
class UtilTestCase(TestCase):
def setUp(self):
self.domain = 'test-domain'
self.user = CommCareUser.create(self.domain, 'test-user', '123')
def tearDown(self):
self.user.delete()
def testCleanPhoneNumber(self):
phone_number = " 324 23-23421241"
cleaned = clean_phone_number(phone_number)
self.assertEqual(cleaned, "+3242323421241")
@run_with_all_backends
def test_get_contact_for_case(self):
with create_test_case(self.domain, 'contact', 'test-case') as case:
contact = get_contact(self.domain, case.case_id)
self.assertEqual(contact.case_id, case.case_id)
self.assertTrue(is_commcarecase(contact))
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain + 'x', case.case_id)
def test_get_contact_for_user(self):
contact = get_contact(self.domain, self.user.get_id)
self.assertEqual(contact.get_id, self.user.get_id)
self.assertTrue(isinstance(contact, CommCareUser))
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain + 'x', self.user.get_id)
def test_contact_not_found(self):
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain, 'this-id-should-not-be-found')
@run_with_all_backends
def test_is_contact_active_for_case(self):
with create_test_case(self.domain, 'contact', 'test-case') as case:
self.assertTrue(is_contact_active(self.domain, 'CommCareCase', case.case_id))
update_case(self.domain, case.case_id, close=True)
self.assertFalse(is_contact_active(self.domain, 'CommCareCase', case.case_id))
def test_is_contact_active_for_user(self):
self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
self.user.is_active = False
self.user.save()
self.assertFalse(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
self.user.is_active = True
self.user.save()
self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
def test_apply_leniency(self):
self.assertEqual('16175551234', apply_leniency(' 1 (617) 555-1234 '))
self.assertEqual('16175551234', apply_leniency(' 1.617.555.1234 '))
self.assertEqual('16175551234', apply_leniency(' +1 617 555 1234 '))
| Python | 0.000006 |
4f0d43f3c451a4059a2931ec771a8d796396250e | fasta2imgt converts to upper | bin/fasta2imgt.py | bin/fasta2imgt.py | #! /usr/bin/env python
import sys
import optparse
from Bio import SeqIO
from Bio.Alphabet import generic_dna
import vdj
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
if len(args) == 2:
inhandle = open(args[0],'r')
outhandle = open(args[1],'w')
elif len(args) == 1:
inhandle = open(args[0],'r')
outhandle = sys.stdout
elif len(args) == 0:
inhandle = sys.stdin
outhandle = sys.stdout
else:
raise Exception, "Wrong number of arguments."
for record in SeqIO.parse(inhandle,'fasta',generic_dna):
chain = vdj.ImmuneChain(record.upper())
print >>outhandle, chain
| #! /usr/bin/env python
import sys
import optparse
from Bio import SeqIO
from Bio.Alphabet import generic_dna
import vdj
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
if len(args) == 2:
inhandle = open(args[0],'r')
outhandle = open(args[1],'w')
elif len(args) == 1:
inhandle = open(args[0],'r')
outhandle = sys.stdout
elif len(args) == 0:
inhandle = sys.stdin
outhandle = sys.stdout
else:
raise Exception, "Wrong number of arguments."
for record in SeqIO.parse(inhandle,'fasta',generic_dna):
chain = vdj.ImmuneChain(record)
print >>outhandle, chain
| Python | 0.999999 |
9ebf03ddcba26054824547f6d1094ba9fb37a030 | Restructure the create_permission signal handler to perform fewer SQL queries, this speeds up the test suite dramatically. | django/contrib/auth/management/__init__.py | django/contrib/auth/management/__init__.py | """
Creates permissions for all installed apps that need permissions.
"""
from django.contrib.auth import models as auth_app
from django.db.models import get_models, signals
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = set()
# The codenames and ctypes that should exist.
ctypes = set()
codenames = set()
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
codenames.add(perm[0])
searched_perms.add((ctype, perm))
# Find all the Permissions that a) have a content_type for a model we're
# looking for, and b) have a codename we're looking for. It doesn't need to
# have both, we have a list of exactly what we want, and it's faster to
# write the query with fewer conditions.
all_perms = set(auth_app.Permission.objects.filter(
content_type__in=ctypes,
codename__in=codenames
).values_list(
"content_type", "codename"
))
for ctype, (codename, name) in searched_perms:
# If the permissions exists, move on.
if (ctype.pk, codename) in all_perms:
continue
p = auth_app.Permission.objects.create(
codename=codename,
name=name,
content_type=ctype
)
if verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.core.management import call_command
if auth_app.User in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
| """
Creates permissions for all installed apps that need permissions.
"""
from django.db.models import get_models, signals
from django.contrib.auth import models as auth_app
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
for codename, name in _get_all_permissions(klass._meta):
p, created = auth_app.Permission.objects.get_or_create(
codename=codename,
content_type__pk=ctype.id,
defaults={
'name': name,
'content_type': ctype
}
)
if created and verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.core.management import call_command
if auth_app.User in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
| Python | 0.000002 |
8431458f7f18ec0dde86d46ec18dbdb61412f8ef | bump version | blaze/__init__.py | blaze/__init__.py | from __future__ import absolute_import, division, print_function
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
inf = float('inf')
nan = float('nan')
__version__ = '0.6.0-dev'
# If IPython is already loaded, register the Blaze catalog magic
# from . import catalog
# import sys
# if 'IPython' in sys.modules:
# catalog.register_ipy_magic()
# del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import dynd
import datashape
import blz
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("DyND version: %s / LibDyND %s" %
(dynd.__version__, dynd.__libdynd_version__))
print("BLZ version: %s" % blz.__version__)
print("Blosc version: %s (%s)" % blz.blosc_version())
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blz.detect_number_of_cores())
print("-=" * 38)
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
| from __future__ import absolute_import, division, print_function
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
inf = float('inf')
nan = float('nan')
__version__ = '0.4.2-dev'
# If IPython is already loaded, register the Blaze catalog magic
# from . import catalog
# import sys
# if 'IPython' in sys.modules:
# catalog.register_ipy_magic()
# del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import dynd
import datashape
import blz
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("DyND version: %s / LibDyND %s" %
(dynd.__version__, dynd.__libdynd_version__))
print("BLZ version: %s" % blz.__version__)
print("Blosc version: %s (%s)" % blz.blosc_version())
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blz.detect_number_of_cores())
print("-=" * 38)
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
| Python | 0 |
f794817bf62c8f92a6d7d9e55e13866dc63df7ba | Fix issue #7 | botbot/checker.py | botbot/checker.py | import stat, os
from . import problems
class Checker:
"""
Holds a set of checks that can be run on a file to make sure that
it's suitable for the shared directory. Runs checks recursively on a
given path.
"""
# checks is a set of all the checking functions this checker knows of. All
# checkers return a number signifying a specific problem with the
# file specified in the path.
def __init__(self):
self.checks = set()
self.all_problems = list()
def register(self, fn):
"""Add a new checking function to the set, or a list/tuple of functions."""
if isinstance(fn, list) or isinstance(fn, tuple):
for f in fn:
self.checks.add(f)
else:
self.checks.add(fn)
def check_tree(self, path):
"""
Run all the checks on every file in the specified path,
recursively. Returns a list of tuples. Each tuple contains 2
elements: the first is the path of the file, and the second is a
list of issues with the file at that path.
"""
mode = os.stat(path).st_mode
for f in os.listdir(path):
newpath = os.path.join(path, f)
np_mode = os.stat(newpath).st_mode
if stat.S_ISDIR(np_mode):
self.check_tree(newpath)
else:
current_problems = set()
for check in self.checks:
p = check(newpath)
current_problems.add(p)
self.all_problems.append((newpath, current_problems))
# Note: this section removes the residual dummy errors
# from files that have other errors. It adds another O(n)
# loop where we could have done it in that previous loop,
# so we should probably optimize it at some point.
for prob in self.all_problems:
prob_set = prob[1]
n = len(prob_set)
if problems.PROB_NO_PROBLEM in prob[1] and n > 1:
prob[1].remove(problems.PROB_NO_PROBLEM)
def pretty_print_issues(self, verbose):
"""Print a list of issues with their fixes."""
for p in self.all_problems:
for m in p[1]:
if (verbose):
print(p[0] + ": " + m.message + " " + m.fix)
else:
if m != problems.PROB_NO_PROBLEM:
print(p[0] + ": " + m.message + " " + m.fix)
def has_permission_issues(path):
"""Check whether a given path has bad permissons."""
mode = os.stat(path).st_mode
if stat.S_ISDIR(mode) and not stat.S_IXGRP(mode):
return problems.PROB_DIR_NOT_EXEC
else:
if not bool(stat.S_IRGRP & mode):
return problems.PROB_FILE_NOT_GRPRD
else:
return problems.PROB_NO_PROBLEM
def is_fastq(path):
"""Check whether a given file is a fastq file."""
if os.path.splitext(path)[1] == ".fastq":
if not os.path.islink(path):
return problems.PROB_FILE_IS_FASTQ
return problems.PROB_NO_PROBLEM
| import stat, os
from . import problems
class Checker:
"""
Holds a set of checks that can be run on a file to make sure that
it's suitable for the shared directory. Runs checks recursively on a
given path.
"""
# checks is a set of all the checking functions this checker knows of. All
# checkers return a number signifying a specific problem with the
# file specified in the path.
def __init__(self):
self.checks = set()
self.all_problems = list()
def register(self, fn):
"""Add a new checking function to the set, or a list/tuple of functions."""
if isinstance(fn, list) or isinstance(fn, tuple):
for f in fn:
self.checks.add(f)
else:
self.checks.add(fn)
def check_tree(self, path):
"""
Run all the checks on every file in the specified path,
recursively. Returns a list of tuples. Each tuple contains 2
elements: the first is the path of the file, and the second is a
list of issues with the file at that path.
"""
mode = os.stat(path).st_mode
for f in os.listdir(path):
newpath = os.path.join(path, f)
np_mode = os.stat(newpath).st_mode
if stat.S_ISDIR(np_mode):
self.check_tree(newpath)
else:
current_problems = set()
for check in self.checks:
p = check(newpath)
current_problems.add(p)
self.all_problems.append((newpath, current_problems))
# Note: this section removes the residual dummy errors
# from files that have other errors. It adds another O(n)
# loop where we could have done it in that previous loop,
# so we should probably optimize it at some point.
for prob in self.all_problems:
prob_set = prob[1]
n = len(prob_set)
if problems.PROB_NO_PROBLEM in prob[1] and n > 1:
prob[1].remove(problems.PROB_NO_PROBLEM)
def pretty_print_issues(self, verbose):
"""Print a list of issues with their fixes."""
for p in self.all_problems:
for m in p[1]:
if (verbose):
print(p[0] + ": " + m.message + " " + m.fix)
else:
if m != problems.PROB_NO_PROBLEM:
print(p[0] + ": " + m.message + " " + m.fix)
def has_permission_issues(path):
"""Check whether a given path has bad permissons."""
mode = os.stat(path).st_mode
if stat.S_ISDIR(mode) and not stat.S_IXGRP(mode):
return problems.PROB_DIR_NOT_EXEC
else:
if not bool(stat.S_IRGRP & mode):
return problems.PROB_FILE_NOT_GRPRD
else:
return problems.PROB_NO_PROBLEM
def is_fastq(path):
"""Check whether a given file is a fastq file."""
if os.path.splitext(path)[1] == ".fastq":
return problems.PROB_FILE_IS_FASTQ
else:
return problems.PROB_NO_PROBLEM
| Python | 0 |
a7b9c9a120aebe270ea200f3be0b2d3468f911cf | Bump version | modelqueryform/__init__.py | modelqueryform/__init__.py | __version__ = "2.2"
| __version__ = "2.1"
| Python | 0 |
ed360f5d896593f2646037c1b2028d8a5552a2d2 | fix test import data | tests/case_manager/test_case_data_manager.py | tests/case_manager/test_case_data_manager.py | # @Time : 2016/9/1 21:04
# @Author : lixintong
import datetime
import os
import unittest
from uitester.case_manager.case_data_manager import CaseDataManager
class TestCaseDataManager(unittest.TestCase):
def setUp(self):
self.case_data_manager = CaseDataManager()
self.package_name = ''
def test_export_and_import_data(self):
# notice export and import must has common CaseDataManager
# export test
print(" export start :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
case_list = self.case_data_manager.db_helper.query_case_all()
cases_id_list = []
for case in case_list:
cases_id_list.append(str(case.id)) # 类型转成str
path = os.path.join(os.getcwd(),'data.dpk')
# self.package_name = self.case_data_manager.export_data(path, cases_id_list)
self.case_data_manager.export_data(path, cases_id_list)#导入数据
print(" export finish :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# import test
print(" import start :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
conflict_datas = self.case_data_manager.import_data(path)#有冲突
conflict_datas = self.case_data_manager.import_data(path) # 无冲突
if conflict_datas:
updata_tag_message_list = []
for key in conflict_datas:
data = conflict_datas[key]
updata_tag_message_list.append(data)
self.case_data_manager.merge_conflict_data(updata_tag_message_list) # result validation unfinished
print(self.case_data_manager.case_file_data["name"][0])
case = self.case_data_manager.db_helper.query_case_by_name(True,
self.case_data_manager.case_file_data[
"name"][0])
self.assertTrue(case is not None)
print("import finish :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
| # @Time : 2016/9/1 21:04
# @Author : lixintong
import datetime
import os
import unittest
from uitester.case_manager.case_data_manager import CaseDataManager
class TestCaseDataManager(unittest.TestCase):
def setUp(self):
self.case_data_manager = CaseDataManager()
self.package_name = ''
def test_export_and_import_data(self):
# notice export and import must has common CaseDataManager
# export test
print(" export start :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
case_list = self.case_data_manager.db_helper.query_case_all()
cases_id_list = []
for case in case_list:
cases_id_list.append(str(case.id)) # 类型转成str
path = os.path.join(os.getcwd(),'data.dpk')
# self.package_name = self.case_data_manager.export_data(path, cases_id_list)
self.case_data_manager.export_data(path, cases_id_list)#导入数据
print(" export finish :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# import test
print(" import start :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
conflict_datas = self.case_data_manager.import_data(path)#有冲突
conflict_datas = self.case_data_manager.import_data(path) # 无冲突
if conflict_datas:
updata_tag_message_list = []
for key in conflict_datas:
data = conflict_datas[key]
updata_tag_message_list.append(data)
self.case_data_manager.merge_conflict_data(updata_tag_message_list) # result validation unfinished
print(self.case_data_manager.case_file_data["name"][0])
case = self.case_data_manager.db_helper.query_case_by_name(True,
self.case_data_manager.case_file_data[
"name"][0])
self.assertTrue(case is not None)
print("import finish :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
| Python | 0 |
4d40e9db4bd6b58787557e8d5547f69eb67c9b96 | Add additional coverage to author build list | tests/changes/api/test_author_build_index.py | tests/changes/api/test_author_build_index.py | from uuid import uuid4
from changes.config import db
from changes.models import Author
from changes.testutils import APITestCase
class AuthorBuildListTest(APITestCase):
def test_simple(self):
fake_author_id = uuid4()
self.create_build(self.project)
path = '/api/0/authors/{0}/builds/'.format(fake_author_id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 0
author = Author(email=self.default_user.email, name='Foo Bar')
db.session.add(author)
build = self.create_build(self.project, author=author)
path = '/api/0/authors/{0}/builds/'.format(author.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == build.id.hex
self.login(self.default_user)
path = '/api/0/authors/me/builds/'
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == build.id.hex
| from uuid import uuid4
from changes.config import db
from changes.models import Author
from changes.testutils import APITestCase
class AuthorBuildListTest(APITestCase):
def test_simple(self):
fake_author_id = uuid4()
self.create_build(self.project)
path = '/api/0/authors/{0}/builds/'.format(fake_author_id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 0
author = Author(email='foo@example.com', name='Foo Bar')
db.session.add(author)
build = self.create_build(self.project, author=author)
path = '/api/0/authors/{0}/builds/'.format(author.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == build.id.hex
| Python | 0 |
03aebd7eff51be1847866d9920b8520cee72348f | fix failure in test_global_pinger_memo | tests/python/pants_test/cache/test_pinger.py | tests/python/pants_test/cache/test_pinger.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import threading
import time
from six.moves import SimpleHTTPServer, socketserver
from pants.cache.pinger import Pinger
from pants_test.base_test import BaseTest
def get_delayed_handler(delay):
class DelayResponseHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_HEAD(self):
time.sleep(delay)
self.send_response(200)
self.end_headers()
return DelayResponseHandler
class TestPinger(BaseTest):
timeout_seconds = .6
slow_seconds = .05
fast_seconds = 0
def setup_delayed_server(self, delay):
server = socketserver.TCPServer(("", 0), get_delayed_handler(delay))
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def setUp(self):
timeout = self.setup_delayed_server(self.timeout_seconds)
slow = self.setup_delayed_server(self.slow_seconds)
fast = self.setup_delayed_server(self.fast_seconds)
self.servers = [timeout, slow, fast]
self.fast_netloc = 'localhost:{}'.format(fast.socket.getsockname()[1])
self.slow_netloc = 'localhost:{}'.format(slow.socket.getsockname()[1])
self.timeout_netloc = 'localhost:{}'.format(timeout.socket.getsockname()[1])
def test_pinger_times_correct(self):
test = Pinger(timeout=.5, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc, self.timeout_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], ping_results[self.slow_netloc])
self.assertEqual(ping_results[self.timeout_netloc], Pinger.UNREACHABLE)
def test_pinger_timeout_config(self):
test = Pinger(timeout=self.slow_seconds - .01, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], 1)
self.assertEqual(ping_results[self.slow_netloc], Pinger.UNREACHABLE)
def test_global_pinger_memo(self):
fast_pinger = Pinger(timeout=self.slow_seconds - .01, tries=2)
slow_pinger = Pinger(timeout=self.timeout_seconds, tries=2)
self.assertEqual(fast_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE)
self.assertNotEqual(slow_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE)
def tearDown(self):
for server in self.servers:
server.shutdown()
| # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import threading
import time
from six.moves import SimpleHTTPServer, socketserver
from pants.cache.pinger import Pinger
from pants_test.base_test import BaseTest
def get_delayed_handler(delay):
class DelayResponseHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_HEAD(self):
time.sleep(delay)
self.send_response(200)
self.end_headers()
return DelayResponseHandler
class TestPinger(BaseTest):
timeout_seconds = .6
slow_seconds = .05
fast_seconds = 0
def setup_delayed_server(self, delay):
server = socketserver.TCPServer(("", 0), get_delayed_handler(delay))
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def setUp(self):
timeout = self.setup_delayed_server(self.timeout_seconds)
slow = self.setup_delayed_server(self.slow_seconds)
fast = self.setup_delayed_server(self.fast_seconds)
self.servers = [timeout, slow, fast]
self.fast_netloc = 'localhost:{}'.format(fast.socket.getsockname()[1])
self.slow_netloc = 'localhost:{}'.format(slow.socket.getsockname()[1])
self.timeout_netloc = 'localhost:{}'.format(timeout.socket.getsockname()[1])
def test_pinger_times_correct(self):
test = Pinger(timeout=.5, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc, self.timeout_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], ping_results[self.slow_netloc])
self.assertEqual(ping_results[self.timeout_netloc], Pinger.UNREACHABLE)
def test_pinger_timeout_config(self):
test = Pinger(timeout=self.slow_seconds - .01, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], 1)
self.assertEqual(ping_results[self.slow_netloc], Pinger.UNREACHABLE)
def test_global_pinger_memo(self):
fast_pinger = Pinger(timeout=self.slow_seconds, tries=2)
slow_pinger = Pinger(timeout=self.timeout_seconds, tries=2)
self.assertEqual(fast_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE)
self.assertNotEqual(slow_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE)
def tearDown(self):
for server in self.servers:
server.shutdown()
| Python | 0.000017 |
932fccc77fb10ece61c3feeb47a28225216c7c0d | add two more authors for gemeinfrei_2021.py | service/ws_re/scanner/tasks/gemeinfrei_2021.py | service/ws_re/scanner/tasks/gemeinfrei_2021.py | import pywikibot
from service.ws_re.register.authors import Authors
from service.ws_re.scanner.tasks.base_task import ReScannerTask
from service.ws_re.template.article import Article
from tools.bots.pi import WikiLogger
class GF21Task(ReScannerTask):
def __init__(self, wiki: pywikibot.Site, logger: WikiLogger, debug: bool = True):
super().__init__(wiki, logger, debug)
self.authors = Authors()
def task(self):
for re_article in self.re_page:
if isinstance(re_article, Article):
authors = self.authors.get_author_by_mapping(re_article.author[0], re_article["BAND"].value)
for author in authors:
author_string = f"{author.first_name} {author.last_name}"
if author_string in ("Arthur Stein", "Hugo Willrich", "Edward Capps", "Kurt Witte",
"August Hug", "Max Radin", "Werner Schur", "Percy Neville Ure",
"Herbert Bannert", "Adolf Wilhelm", "Wilhelm Schmid"):
if re_article["KEINE_SCHÖPFUNGSHÖHE"].value:
re_article["TODESJAHR"].value = ""
re_article["KEINE_SCHÖPFUNGSHÖHE"].value = False
| import pywikibot
from service.ws_re.register.authors import Authors
from service.ws_re.scanner.tasks.base_task import ReScannerTask
from service.ws_re.template.article import Article
from tools.bots.pi import WikiLogger
class GF21Task(ReScannerTask):
def __init__(self, wiki: pywikibot.Site, logger: WikiLogger, debug: bool = True):
super().__init__(wiki, logger, debug)
self.authors = Authors()
def task(self):
for re_article in self.re_page:
if isinstance(re_article, Article):
authors = self.authors.get_author_by_mapping(re_article.author[0], re_article["BAND"].value)
for author in authors:
author_string = f"{author.first_name} {author.last_name}"
if author_string in ("Arthur Stein", "Hugo Willrich", "Edward Capps", "Kurt Witte",
"August Hug", "Max Radin", "Werner Schur", "Percy Neville Ure",
"Herbert Bannert"):
if re_article["KEINE_SCHÖPFUNGSHÖHE"].value:
re_article["TODESJAHR"].value = ""
re_article["KEINE_SCHÖPFUNGSHÖHE"].value = False
| Python | 0 |
6a7d7393d90c1a10071b392d24431af1111a0824 | clean up | streamteam/dynamics/plot.py | streamteam/dynamics/plot.py | # coding: utf-8
""" ...explain... """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import matplotlib.pyplot as plt
import numpy as np
__all__ = ['plot_orbits']
def plot_orbits(x, ix=None, axes=None, triangle=False, **kwargs):
"""
Given time series of positions, `x`, make nice plots of the orbit in
cartesian projections.
Parameters
----------
x : array_like
Array of positions. The last axis (`axis=-1`) is assumed
to be the dimensionality, e.g., `x.shape[-1]`. The first axis
(`axis=0`) is assumed to be the time axis.
ix : int, array_like (optional)
Index or array of indices of orbits to plot. For example, if `x` is an
array of shape (1024,32,6) -- 1024 timesteps for 32 orbits in 6D
phase-space -- `ix` would specify which of the 32 orbits to plot.
axes : array_like (optional)
Array of matplotlib Axes objects.
triangle : bool (optional)
Make a triangle plot instead of plotting all projections in a single row.
Other Parameters
----------------
kwargs
All other keyword arguments are passed to the matplotlib `plot()` call.
You can pass in any of the usual style kwargs like `color=...`,
`marker=...`, etc.
"""
if triangle and axes is None:
fig,axes = plt.subplots(2,2,figsize=(12,12),sharex='col',sharey='row')
axes[0,1].set_visible(False)
axes = axes.flat
axes = [axes[0],axes[2],axes[3]]
elif triangle and axes is not None:
try:
axes = axes.flat
except:
pass
if len(axes) == 4:
axes = [axes[0],axes[2],axes[3]]
elif not triangle and axes is None:
fig,axes = plt.subplots(1,3,figsize=(12,5),sharex=True,sharey=True)
if ix is not None:
ixs = np.atleast_1d(ix)
else:
ixs = range(x.shape[1])
for ii in ixs:
axes[0].plot(x[:,ii,0], x[:,ii,1], **kwargs)
axes[1].plot(x[:,ii,0], x[:,ii,2], **kwargs)
axes[2].plot(x[:,ii,1], x[:,ii,2], **kwargs)
if triangle:
# HACK: until matplotlib 1.4 comes out, need this
axes[0].set_ylim(axes[0].get_xlim())
axes[2].set_xlim(axes[0].get_ylim())
axes[0].set_ylabel("Y")
axes[1].set_xlabel("X")
axes[1].set_ylabel("Z")
axes[2].set_xlabel("Y")
else:
axes[0].set_xlabel("X")
axes[0].set_ylabel("Y")
axes[1].set_xlabel("X")
axes[1].set_ylabel("Z")
axes[2].set_xlabel("Y")
axes[2].set_ylabel("Z")
if not triangle:
axes[0].figure.tight_layout()
return axes[0].figure | # coding: utf-8
""" ...explain... """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import matplotlib.pyplot as plt
import numpy as np
__all__ = ['plot_orbits']
def plot_orbits(w, ix=None, axes=None, triangle=False, **kwargs):
"""
TODO:
"""
if triangle and axes is None:
fig,axes = plt.subplots(2,2,figsize=(12,12),sharex='col',sharey='row')
axes[0,1].set_visible(False)
axes = axes.flat
axes = [axes[0],axes[2],axes[3]]
elif triangle and axes is not None:
try:
axes = axes.flat
except:
pass
if len(axes) == 4:
axes = [axes[0],axes[2],axes[3]]
elif not triangle and axes is None:
fig,axes = plt.subplots(1,3,figsize=(12,5),sharex=True,sharey=True)
if ix is not None:
ixs = [ix]
else:
ixs = range(w.shape[1])
for ii in ixs:
axes[0].plot(w[:,ii,0], w[:,ii,1], **kwargs)
axes[1].plot(w[:,ii,0], w[:,ii,2], **kwargs)
axes[2].plot(w[:,ii,1], w[:,ii,2], **kwargs)
if triangle:
# HACK: until matplotlib 1.4 comes out, need this
axes[0].set_ylim(axes[0].get_xlim())
axes[2].set_xlim(axes[0].get_ylim())
axes[0].set_ylabel("Y")
axes[1].set_xlabel("X")
axes[1].set_ylabel("Z")
axes[2].set_xlabel("Y")
else:
axes[0].set_xlabel("X")
axes[0].set_ylabel("Y")
axes[1].set_xlabel("X")
axes[1].set_ylabel("Z")
axes[2].set_xlabel("Y")
axes[2].set_ylabel("Z")
if not triangle:
axes[0].figure.tight_layout()
return axes[0].figure | Python | 0.000001 |
d56382a87068e7d43b3333b6ea3dc2fd0a80d929 | Use dict instead of list | 10-disambiguate.py | 10-disambiguate.py | #!/usr/bin/env python
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
import csv
import gc
import sys
from collections import defaultdict
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import cosine_similarity as sim
from operator import itemgetter
from multiprocessing import Pool, cpu_count
wsi = defaultdict(lambda: dict())
v = DictVectorizer()
D = []
with open('03-cw-wsi.txt') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
word, sid, _, words = row
try:
words = {k: float(v) for record in words.split(' ') for k, v in (record.rsplit(':', 1),)}
except ValueError:
print('Skipping misformatted string: %s.' % words, file=sys.stderr)
continue
wsi[word][int(sid)] = words
D.append(words)
X = v.fit_transform(D)
def emit(word):
sneighbours = {}
for sid, words in wsi[word].items():
sense = '%s#%d' % (word, sid)
features = words.copy()
features.update({word: 1.})
vector = v.transform(features)
sneighbours[sense] = {}
for neighbour, weight in words.items():
neighbours = wsi[neighbour]
candidates = {nsid: sim(vector, v.transform(neighbours[nsid])).item(0) for nsid in neighbours}
_, cosine = max(candidates.items(), key=itemgetter(1))
if cosine > 0:
nsense = '%s#%d' % (neighbour, nsid)
sneighbours[sense][nsense] = weight
return sneighbours
with Pool(cpu_count()) as pool:
for sneighbours in pool.imap_unordered(emit, wsi):
for sense, neighbours in sneighbours.items():
for nsense, weight in neighbours.items():
print('%s\t%s\t%f' % (sense, nsense, weight))
| #!/usr/bin/env python
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
import csv
import gc
import sys
from collections import defaultdict
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import cosine_similarity as sim
from operator import itemgetter
from multiprocessing import Pool, cpu_count
wsi = defaultdict(lambda: dict())
v = DictVectorizer()
D = []
with open('03-cw-wsi.txt') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
word, sid, _, words = row
try:
words = {k: float(v) for record in words.split(' ') for k, v in (record.rsplit(':', 1),)}
except ValueError:
print('Skipping misformatted string: %s.' % words, file=sys.stderr)
continue
wsi[word][int(sid)] = words
D.append(words)
X = v.fit_transform(D)
def emit(word):
sneighbours = {}
for sid, words in wsi[word].items():
sense = '%s#%d' % (word, sid)
features = words.copy()
features.update({word: 1.})
vector = v.transform(features)
sneighbours[sense] = {}
for neighbour, weight in words.items():
neighbours = wsi[neighbour]
candidates = [(nsid, sim(vector, v.transform(neighbours[nsid])).item(0)) for nsid in neighbours]
nsid, cosine = max(candidates, key=itemgetter(1))
if cosine > 0:
nsense = '%s#%d' % (neighbour, nsid)
sneighbours[sense][nsense] = weight
return sneighbours
with Pool(cpu_count()) as pool:
for sneighbours in pool.imap_unordered(emit, wsi):
for sense, neighbours in sneighbours.items():
for nsense, weight in neighbours.items():
print('%s\t%s\t%f' % (sense, nsense, weight))
| Python | 0.000001 |
a2753124d89689dcfd3f90e050417d38a17bdd60 | Fix redis caching when multiple GitDox instances share a Redis instance | modules/redis_cache.py | modules/redis_cache.py | import os
import platform
import redis
from modules.configobj import ConfigObj
r = redis.Redis()
GITDOX_PREFIX = "__gitdox"
SEP = "|"
REPORT = "report"
TIMESTAMP = "timestamp"
if platform.system() == "Windows":
prefix = "transc\\"
else:
prefix = ""
rootpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + os.sep
userdir = rootpath + "users" + os.sep
config = ConfigObj(userdir + 'config.ini')
PROJECT_NAME = "_" + (config['project'].lower().replace(" ", "_") if 'project' in config else 'default_project')
def make_key_base(doc_id, validation_type):
"""Keys for this cache have the form, e.g., __gitdox|123|ether|report
This function formats the first three pieces of this string."""
if validation_type not in ["xml", "ether", "meta", "export"]:
raise Exception("Unknown validation type: " + validation_type)
return SEP.join([GITDOX_PREFIX, PROJECT_NAME str(doc_id), validation_type])
# common ------------------------------------------------------------------------
def get_report(doc_id, validation_type):
"""Returns the report for the given validation type if present in the cache,
False otherwise"""
key_base = make_key_base(doc_id, validation_type)
if key_base + SEP + REPORT in r:
return r.get(key_base + SEP + REPORT)
return False
def get_timestamp(doc_id, validation_type):
"""For ether and export validation types, returns the associated timestamp
obtained from roomtimes at the time of validation."""
key_base = make_key_base(doc_id, validation_type)
if key_base + SEP + TIMESTAMP in r:
return r.get(key_base + SEP + TIMESTAMP)
return False
def invalidate_by_doc(doc_id, validation_type):
"""Invalidates the report for a given validation type for a given doc."""
key_base = make_key_base(doc_id, validation_type)
r.delete(key_base + SEP + REPORT)
if key_base + SEP + TIMESTAMP in r:
r.delete(key_base + SEP + TIMESTAMP)
def invalidate_by_type(validation_type):
"""Invalidates the reports for a given validation type for all docs."""
pattern = GITDOX_PREFIX + "*" + SEP + validation_type + SEP + "*"
for key in r.keys(pattern=pattern):
r.delete(key)
def reset_cache():
"""Invalidates all reports."""
pattern = GITDOX_PREFIX + "*"
for key in r.keys(pattern=pattern):
r.delete(key)
# Functions for xml and meta ----------------------------------------------------
def cache_validation_result(doc_id, validation_type, report):
"""Caching for non-ethercalc-based validation types, currently xml and meta."""
if validation_type not in ["xml", "meta"]:
raise Exception("Mode must be one of 'xml', 'meta'.")
key_base = make_key_base(doc_id, validation_type)
r.set(key_base + SEP + REPORT, report)
# Functions for ether and export ------------------------------------------------
def cache_timestamped_validation_result(doc_id, validation_type, report, timestamp):
"""Caching for ethercalc-based validation types, currently ether and export.
For xml and meta we are able to maintain the cache because Gitdox knows when
xml or meta has changed, but with ethercalc, Gitdox is not informed of
changes, so we must compare timestamps."""
if validation_type not in ["ether", "export"]:
raise Exception("Mode must be one of 'ether', 'export'.")
key_base = make_key_base(doc_id, validation_type)
r.set(key_base + SEP + REPORT, report)
r.set(key_base + SEP + TIMESTAMP, timestamp)
| import redis
r = redis.Redis()
GITDOX_PREFIX = "__gitdox"
SEP = "|"
REPORT = "report"
TIMESTAMP = "timestamp"
def make_key_base(doc_id, validation_type):
"""Keys for this cache have the form, e.g., __gitdox|123|ether|report
This function formats the first three pieces of this string."""
if validation_type not in ["xml", "ether", "meta", "export"]:
raise Exception("Unknown validation type: " + validation_type)
return SEP.join([GITDOX_PREFIX, str(doc_id), validation_type])
# common ------------------------------------------------------------------------
def get_report(doc_id, validation_type):
"""Returns the report for the given validation type if present in the cache,
False otherwise"""
key_base = make_key_base(doc_id, validation_type)
if key_base + SEP + REPORT in r:
return r.get(key_base + SEP + REPORT)
return False
def get_timestamp(doc_id, validation_type):
"""For ether and export validation types, returns the associated timestamp
obtained from roomtimes at the time of validation."""
key_base = make_key_base(doc_id, validation_type)
if key_base + SEP + TIMESTAMP in r:
return r.get(key_base + SEP + TIMESTAMP)
return False
def invalidate_by_doc(doc_id, validation_type):
"""Invalidates the report for a given validation type for a given doc."""
key_base = make_key_base(doc_id, validation_type)
r.delete(key_base + SEP + REPORT)
if key_base + SEP + TIMESTAMP in r:
r.delete(key_base + SEP + TIMESTAMP)
def invalidate_by_type(validation_type):
"""Invalidates the reports for a given validation type for all docs."""
pattern = GITDOX_PREFIX + "*" + SEP + validation_type + SEP + "*"
for key in r.keys(pattern=pattern):
r.delete(key)
def reset_cache():
"""Invalidates all reports."""
pattern = GITDOX_PREFIX + "*"
for key in r.keys(pattern=pattern):
r.delete(key)
# Functions for xml and meta ----------------------------------------------------
def cache_validation_result(doc_id, validation_type, report):
"""Caching for non-ethercalc-based validation types, currently xml and meta."""
if validation_type not in ["xml", "meta"]:
raise Exception("Mode must be one of 'xml', 'meta'.")
key_base = make_key_base(doc_id, validation_type)
r.set(key_base + SEP + REPORT, report)
# Functions for ether and export ------------------------------------------------
def cache_timestamped_validation_result(doc_id, validation_type, report, timestamp):
"""Caching for ethercalc-based validation types, currently ether and export.
For xml and meta we are able to maintain the cache because Gitdox knows when
xml or meta has changed, but with ethercalc, Gitdox is not informed of
changes, so we must compare timestamps."""
if validation_type not in ["ether", "export"]:
raise Exception("Mode must be one of 'ether', 'export'.")
key_base = make_key_base(doc_id, validation_type)
r.set(key_base + SEP + REPORT, report)
r.set(key_base + SEP + TIMESTAMP, timestamp)
| Python | 0 |
72df22e62806e64e05b3bbb6eca0efd958c7c8bb | make btcnet_wrapper fail in a more instructive manner | btcnet_wrapper.py | btcnet_wrapper.py | from git import Repo
try:
repo = Repo("btcnet_info")
except:
repo = Repo.init("btcnet_info")
repo = repo.clone("git://github.com/c00w/btcnet_info.git")
origin = repo.create_remote('origin', 'git://github.com/c00w/btcnet_info.git')
origin = repo.remotes.origin
origin.fetch()
origin.pull('master')
try:
import btcnet_info
except:
print 'Install pythongit! See the readme for detailed instructions'
import os
os._exit(2)
| from git import Repo
try:
repo = Repo("btcnet_info")
except:
repo = Repo.init("btcnet_info")
repo = repo.clone("git://github.com/c00w/btcnet_info.git")
origin = repo.create_remote('origin', 'git://github.com/c00w/btcnet_info.git')
origin = repo.remotes.origin
origin.fetch()
origin.pull('master')
import btcnet_info
| Python | 0.000001 |
1ac423e9127631eeb78868c47cf6fee12bf36a12 | Fix bug in handling get/post, should work now | test_utils/middleware/testmaker.py | test_utils/middleware/testmaker.py | from django.conf import settings
from django.test import Client
from django.test.utils import setup_test_environment
import logging, re
from django.utils.encoding import force_unicode
log = logging.getLogger('testmaker')
print "Loaded Testmaker Middleware"
#Remove at your own peril
debug = getattr(settings, 'DEBUG', False)
if not debug:
print "THIS CODE IS NOT MEANT FOR USE IN PRODUCTION"
#return
class TestMakerMiddleware(object):
def process_request(self, request):
if 'test_client_true' not in request.REQUEST:
log_request(request)
if request.method.lower() == "get":
setup_test_environment()
c = Client()
getdict = request.GET.copy()
getdict['test_client_true'] = 'yes' #avoid recursion
r = c.get(request.path, getdict)
log_status(request.path, r)
if r.context and r.status_code != 404:
con = get_user_context(r.context)
output_user_context(con)
def log_request(request):
log.info('\n\tdef %s(self): ' % 'test_path')
method = request.method.lower()
request_str = "'%s', {" % request.path
for dikt in request.REQUEST.dicts:
for arg in dikt:
request_str += "'%s': '%s'" % (arg, request.REQUEST[arg])
request_str += "}"
log.info("\t\tr = c.%s(%s)" % (method, request_str))
def log_status(path, request):
log.info("\t\tself.assertEqual(r.status_code, %s)" % request.status_code)
def get_user_context(context_list):
#Ugly Hack. Needs to be a better way
if isinstance(context_list, list):
context_list = context_list[-1] #Last context rendered
ret = context_list.dicts[-1]
if ret == {}:
ret = context_list.dicts[0]
return ret
else:
return context_list
def output_user_context(context):
for var in context:
try:
if not re.search("0x\w+", force_unicode(context[var])): #Avoid memory addy's which will change.
log.info(u'\t\tself.assertEqual(unicode(r.context[-1]["%s"]), u"%s")' % (var, unicode(context[var])))
except UnicodeDecodeError, e:
#FIXME: This might blow up on odd encoding
pass
| from django.conf import settings
from django.test import Client
from django.test.utils import setup_test_environment
import logging, re
from django.utils.encoding import force_unicode
log = logging.getLogger('testmaker')
print "Loaded Testmaker Middleware"
#Remove at your own peril
debug = getattr(settings, 'DEBUG', False)
if not debug:
print "THIS CODE IS NOT MEANT FOR USE IN PRODUCTION"
#return
class TestMakerMiddleware(object):
def process_request(self, request):
if 'test_client_true' not in request.REQUEST:
log_request(request)
if request.method.lower() == "get":
setup_test_environment()
c = Client()
getdict = request.GET.copy()
getdict['test_client_true'] = 'yes' #avoid recursion
r = c.get(request.path, getdict)
log_status(request.path, r)
if r.context:
con = get_user_context(r.context)
output_user_context(con)
def log_request(request):
log.info('\n\tdef %s(self): ' % 'test_path')
method = request.method.lower()
request_str = "'%s', {" % request.path
for dict in request.REQUEST.dicts:
for arg in dict:
request_str += "'%s': '%s', " % arg, request.REQUEST[arg]
request_str += "}"
log.info("\t\tr = c.%s(%s)" % (method, request_str))
def log_status(path, request):
log.info("\t\tself.assertEqual(r.status_code, %s)" % request.status_code)
def get_user_context(context_list):
#Ugly Hack. Needs to be a better way
if isinstance(context_list, list):
context_list = context_list[-1] #Last context rendered
ret = context_list.dicts[-1]
if ret == {}:
ret = context_list.dicts[0]
return ret
else:
return context_list
def output_user_context(context):
for var in context:
try:
if not re.search("0x\w+", force_unicode(context[var])): #Avoid memory addy's which will change.
log.info(u'\t\tself.assertEqual(unicode(r.context[-1]["%s"]), u"%s")' % (var, unicode(context[var])))
except Exception, e:
#FIXME: This might blow up on odd encoding or 404s.
pass
| Python | 0 |
46268cb2cf5e4570ef3e08440291e802d9e16b05 | Fix variable name conflict | modules/networking/page.py | modules/networking/page.py | import http.client
import socket
import subprocess
import tempfile
import urllib
from nemubot import __version__
from nemubot.exception import IRCException
from nemubot.tools import web
def load(CONF, add_hook):
# check w3m exists
pass
def headers(url):
"""Retrieve HTTP header for the given URL
Argument:
url -- the page URL to get header
"""
o = urllib.parse.urlparse(url, "http")
if o.netloc == "":
raise IRCException("invalid URL")
if o.scheme == "http":
conn = http.client.HTTPConnection(o.hostname, port=o.port, timeout=5)
else:
conn = http.client.HTTPSConnection(o.hostname, port=o.port, timeout=5)
try:
conn.request("HEAD", o.path, None, {"User-agent":
"Nemubot v%s" % __version__})
except socket.timeout:
raise IRCException("request timeout")
except socket.gaierror:
print ("<tools.web> Unable to receive page %s from %s on %d."
% (o.path, o.hostname, o.port))
raise IRCException("an unexpected error occurs")
try:
res = conn.getresponse()
except http.client.BadStatusLine:
raise IRCException("An error occurs")
finally:
conn.close()
return (res.version, res.status, res.reason, res.getheaders())
def _onNoneDefault():
raise IRCException("An error occurs when trying to access the page")
def fetch(url, onNone=_onNoneDefault):
"""Retrieve the content of the given URL
Argument:
url -- the URL to fetch
"""
try:
req = web.getURLContent(url)
if req is not None:
return req
else:
if onNone is not None:
return onNone()
else:
return None
except socket.timeout:
raise IRCException("The request timeout when trying to access the page")
except socket.error as e:
raise IRCException(e.strerror)
def render(url, onNone=_onNoneDefault):
"""Use w3m to render the given url
Argument:
url -- the URL to render
"""
with tempfile.NamedTemporaryFile() as fp:
cnt = fetch(url, onNone)
if cnt is None:
return None
fp.write(cnt.encode())
args = ["w3m", "-T", "text/html", "-dump"]
args.append(fp.name)
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
return proc.stdout.read().decode()
def traceURL(url, stack=None):
"""Follow redirections and return the redirections stack
Argument:
url -- the URL to trace
"""
if stack is None:
stack = list()
stack.append(url)
if len(stack) > 15:
stack.append('stack overflow :(')
return stack
_, status, _, heads = headers(url)
if status == http.client.FOUND or status == http.client.MOVED_PERMANENTLY or status == http.client.SEE_OTHER:
for h, c in heads:
if h == "Location":
url = c
if url in stack:
stack.append("loop on " + url)
return stack
else:
return traceURL(url, stack)
return stack
| import http.client
import socket
import subprocess
import tempfile
import urllib
from nemubot import __version__
from nemubot.exception import IRCException
from nemubot.tools import web
def load(CONF, add_hook):
# check w3m exists
pass
def headers(url):
"""Retrieve HTTP header for the given URL
Argument:
url -- the page URL to get header
"""
o = urllib.parse.urlparse(url, "http")
if o.netloc == "":
raise IRCException("invalid URL")
if o.scheme == "http":
conn = http.client.HTTPConnection(o.hostname, port=o.port, timeout=5)
else:
conn = http.client.HTTPSConnection(o.hostname, port=o.port, timeout=5)
try:
conn.request("HEAD", o.path, None, {"User-agent":
"Nemubot v%s" % __version__})
except socket.timeout:
raise IRCException("request timeout")
except socket.gaierror:
print ("<tools.web> Unable to receive page %s from %s on %d."
% (o.path, o.hostname, o.port))
raise IRCException("an unexpected error occurs")
try:
res = conn.getresponse()
except http.client.BadStatusLine:
raise IRCException("An error occurs")
finally:
conn.close()
return (res.version, res.status, res.reason, res.getheaders())
def _onNoneDefault():
raise IRCException("An error occurs when trying to access the page")
def fetch(url, onNone=_onNoneDefault):
"""Retrieve the content of the given URL
Argument:
url -- the URL to fetch
"""
try:
req = web.getURLContent(url)
if req is not None:
return req
else:
if onNone is not None:
return onNone()
else:
return None
except socket.timeout:
raise IRCException("The request timeout when trying to access the page")
except socket.error as e:
raise IRCException(e.strerror)
def render(url, onNone=_onNoneDefault):
"""Use w3m to render the given url
Argument:
url -- the URL to render
"""
with tempfile.NamedTemporaryFile() as fp:
cnt = fetch(url, onNone)
if cnt is None:
return None
fp.write(cnt.encode())
args = ["w3m", "-T", "text/html", "-dump"]
args.append(fp.name)
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
return proc.stdout.read().decode()
def traceURL(url, stack=None):
"""Follow redirections and return the redirections stack
Argument:
url -- the URL to trace
"""
if stack is None:
stack = list()
stack.append(url)
if len(stack) > 15:
stack.append('stack overflow :(')
return stack
_, status, _, headers = headers(url)
if status == http.client.FOUND or status == http.client.MOVED_PERMANENTLY or status == http.client.SEE_OTHER:
for h, c in headers:
if h == "Location":
url = c
if url in stack:
stack.append("loop on " + url)
return stack
else:
return traceURL(url, stack)
return stack
| Python | 0.00001 |
e4a5dd51829df198a07232afc06afdff6089ae6c | fix wmt datatype checking (#1259) | parlai/tasks/wmt/agents.py | parlai/tasks/wmt/agents.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FbDialogTeacher
from .build import build
import copy
import os
def _path(task, opt, dt):
# Build the data if it doesn't exist.
build(opt)
return os.path.join(opt['datapath'], 'wmt',
'{task}_{type}.txt'.format(task=task, type=dt))
class EnDeTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
self.task_name = 'en_de'
dt = opt['datatype'].split(':')[0]
opt['datafile'] = _path(self.task_name, opt, dt)
super().__init__(opt, shared)
class DefaultTeacher(EnDeTeacher):
pass
| #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FbDialogTeacher
from .build import build
import copy
import os
def _path(task, opt, dt=''):
# Build the data if it doesn't exist.
build(opt)
if dt == '':
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'wmt',
'{task}_{type}.txt'.format(task=task, type=dt))
class EnDeTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
task = opt.get('task', 'wmt:en_de')
self.task_name = task.split(':')[1] if ':' in task else 'en_de'
opt['datafile'] = _path(self.task_name, opt, opt['datatype'])
super().__init__(opt, shared)
class DefaultTeacher(EnDeTeacher):
pass
| Python | 0 |
542bb81e68975d52c23fa3829233cbff9ead39a7 | Use json for PUT | mnubo/api_manager.py | mnubo/api_manager.py | import requests
import json
import base64
import datetime
def authenticate(func):
def authenticate_and_call(*args):
if not args[0].is_access_token_valid():
args[0].access_token = args[0].fetch_access_token()
return func(*args)
return authenticate_and_call
class APIManager(object):
def __init__(self, client_id, client_secret, hostname):
""" Initializes the API Manager which is responsible for authenticating every request.
:param client_id: the client id generated by mnubo
:param client_secret: the client secret generated by mnubo
:param hostname: the hostname to send the requests (sandbox or production)
"""
self.__client_id = client_id
self.__client_secret = client_secret
self.__hostname = hostname
self.access_token = self.fetch_access_token()
def fetch_access_token(self):
""" Requests the access token necessary to communicate with the mnubo plateform
"""
requested_at = datetime.datetime.now()
r = requests.post(self.get_auth_url(), headers=self.get_token_authorization_header())
json_response = json.loads(r.content)
token = {'access_token': json_response['access_token'], 'expires_in': datetime.timedelta(0, json_response['expires_in']), 'requested_at': requested_at}
return token
def is_access_token_valid(self):
""" Validates if the token is still valid
:return: True of the token is still valid, False if it is expired
"""
return self.access_token['requested_at'] + self.access_token['expires_in'] > datetime.datetime.now()
def get_token_authorization_header(self):
""" Generates the authorization header used while requesting an access token
"""
return {'content-type': 'application/x-www-form-urlencoded', 'Authorization': "Basic " + base64.b64encode(self.__client_id + ":" + self.__client_secret)}
def get_authorization_header(self):
""" Generates the authorization header used to access resources via mnubo's API
"""
return {'content-type': 'application/json', 'Authorization': 'Bearer ' + self.access_token['access_token']}
def get_api_url(self):
""" Generates the general API url
"""
return self.__hostname + '/api/v3/'
def get_auth_url(self):
""" Generates the url to fetch the access token
"""
return self.__hostname + '/oauth/token?grant_type=client_credentials'
@authenticate
def post(self, route, body={}):
""" Build and send a post request authenticated
:param route: which resource to access via the REST API
:param body: body to be appended to the HTTP request
"""
url = self.get_api_url() + route
headers = self.get_authorization_header()
return requests.post(url, data=body, headers=headers)
@authenticate
def put(self, route, body={}):
""" Build and send a put request authenticated
:param route: which resource to access via the REST API
:param body: body to be appended to the HTTP request
"""
url = self.get_api_url() + route
headers = self.get_authorization_header()
return requests.put(url, json=body, headers=headers)
@authenticate
def delete(self, route):
""" Build and send a delete request authenticated
:param route: which resource to access via the REST API
"""
url = self.get_api_url() + route
headers = self.get_authorization_header()
return requests.delete(url, headers=headers)
| import requests
import json
import base64
import datetime
def authenticate(func):
def authenticate_and_call(*args):
if not args[0].is_access_token_valid():
args[0].access_token = args[0].fetch_access_token()
return func(*args)
return authenticate_and_call
class APIManager(object):
def __init__(self, client_id, client_secret, hostname):
""" Initializes the API Manager which is responsible for authenticating every request.
:param client_id: the client id generated by mnubo
:param client_secret: the client secret generated by mnubo
:param hostname: the hostname to send the requests (sandbox or production)
"""
self.__client_id = client_id
self.__client_secret = client_secret
self.__hostname = hostname
self.access_token = self.fetch_access_token()
def fetch_access_token(self):
""" Requests the access token necessary to communicate with the mnubo plateform
"""
requested_at = datetime.datetime.now()
r = requests.post(self.get_auth_url(), headers=self.get_token_authorization_header())
json_response = json.loads(r.content)
token = {'access_token': json_response['access_token'], 'expires_in': datetime.timedelta(0, json_response['expires_in']), 'requested_at': requested_at}
return token
def is_access_token_valid(self):
""" Validates if the token is still valid
:return: True of the token is still valid, False if it is expired
"""
return self.access_token['requested_at'] + self.access_token['expires_in'] > datetime.datetime.now()
def get_token_authorization_header(self):
""" Generates the authorization header used while requesting an access token
"""
return {'content-type': 'application/x-www-form-urlencoded', 'Authorization': "Basic " + base64.b64encode(self.__client_id + ":" + self.__client_secret)}
def get_authorization_header(self):
""" Generates the authorization header used to access resources via mnubo's API
"""
return {'content-type': 'application/json', 'Authorization': 'Bearer ' + self.access_token['access_token']}
def get_api_url(self):
""" Generates the general API url
"""
return self.__hostname + '/api/v3/'
def get_auth_url(self):
""" Generates the url to fetch the access token
"""
return self.__hostname + '/oauth/token?grant_type=client_credentials'
@authenticate
def post(self, route, body={}):
""" Build and send a post request authenticated
:param route: which resource to access via the REST API
:param body: body to be appended to the HTTP request
"""
url = self.get_api_url() + route
headers = self.get_authorization_header()
return requests.post(url, data=body, headers=headers)
@authenticate
def put(self, route, body={}):
""" Build and send a put request authenticated
:param route: which resource to access via the REST API
:param body: body to be appended to the HTTP request
"""
url = self.get_api_url() + route
headers = self.get_authorization_header()
return requests.put(url, data=body, headers=headers)
@authenticate
def delete(self, route):
""" Build and send a delete request authenticated
:param route: which resource to access via the REST API
"""
url = self.get_api_url() + route
headers = self.get_authorization_header()
return requests.delete(url, headers=headers)
| Python | 0.000002 |
226b27ad6e66c7d512ce6cad300b7f96de5ccfa7 | Introduce cache feature to GoogleDrive base logic. | model/googledrive.py | model/googledrive.py | # -*- encoding:utf8 -*-
import os
import httplib2
from oauth2client.client import SignedJwtAssertionCredentials
from apiclient.discovery import build
from model.cache import Cache
class GoogleDrive(object):
@classmethod
def retrieve_content(cls, **kwargs):
document_id = kwargs.get('document_id')
export_type = kwargs.get('export_type')
if not document_id:
print("There is no documentID")
return None
if not export_type:
print("There is no exportType")
return None
# Check document cache exists
content = Cache().get(document_id)
if content:
return content
try:
private_key = os.environ['GOOGLE_PRIVATE_KEY']
if not private_key:
return None
credential_args = (
os.environ['GOOGLE_CLIENT_EMAIL'],
private_key,
'https://www.googleapis.com/auth/drive'
)
credential_kwargs = {
'sub': os.environ.get('GOOGLE_OWNER_EMAIL')
}
credentials = SignedJwtAssertionCredentials(*credential_args, **credential_kwargs)
http = httplib2.Http()
credentials.authorize(http)
service = build('drive', 'v2', http=http)
f = service.files().get(fileId=document_id).execute()
if 'exportLinks' in f and export_type in f['exportLinks']:
download = f['exportLinks'][export_type]
resp, content = service._http.request(download)
# Set document cache
Cache().set(document_id, content)
else:
content = '読み込みに失敗したにゃー'
except Exception as e:
content = '読み込みに失敗したにゃーー : ' + str(e) + ' / ' + str(e.message)
return content
| # -*- encoding:utf8 -*-
import os
import httplib2
from oauth2client.client import SignedJwtAssertionCredentials
from apiclient.discovery import build
class GoogleDrive(object):
@classmethod
def retrieve_content(cls, **kwargs):
document_id = kwargs.get('document_id')
export_type = kwargs.get('export_type')
if not document_id:
print("There is no documentID")
return None
if not export_type:
print("There is no exportType")
return None
try:
private_key = os.environ['GOOGLE_PRIVATE_KEY']
if not private_key:
return None
credential_args = (
os.environ['GOOGLE_CLIENT_EMAIL'],
private_key,
'https://www.googleapis.com/auth/drive'
)
credential_kwargs = {
'sub': os.environ.get('GOOGLE_OWNER_EMAIL')
}
credentials = SignedJwtAssertionCredentials(*credential_args, **credential_kwargs)
http = httplib2.Http()
credentials.authorize(http)
service = build('drive', 'v2', http=http)
f = service.files().get(fileId=document_id).execute()
if 'exportLinks' in f and export_type in f['exportLinks']:
download = f['exportLinks'][export_type]
resp, content = service._http.request(download)
else:
content = '読み込みに失敗したにゃー'
except Exception as e:
content = '読み込みに失敗したにゃーー : ' + str(e) + ' / ' + str(e.message)
return content
| Python | 0 |
2ab36d3f98a3b909801b557df39742ef3a09d561 | Remove unused flag on autodiscover and handle_translation_registrations() | modeltrans/models.py | modeltrans/models.py |
def autodiscover():
'''
Auto-discover INSTALLED_APPS translation.py modules and fail silently when
not present. This forces an import on them to register.
Also import explicit modules.
'''
import os
import sys
import copy
from django.utils.module_loading import module_has_submodule
from modeltrans.translator import translator
from importlib import import_module
from django.conf import settings
from django.apps import apps
mods = [(app_config.name, app_config.module) for app_config in apps.get_app_configs()]
for (app, mod) in mods:
# Attempt to import the app's translation module.
module = '%s.translation' % app
before_import_registry = copy.copy(translator._registry)
try:
import_module(module)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
translator._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an translation module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'translation'):
raise
# In debug mode, print a list of registered models and pid to stdout.
# Note: Differing model order is fine, we don't rely on a particular
# order, as far as base classes are registered before subclasses.
if settings.DEBUG:
try:
if sys.argv[1] in ('runserver', 'runserver_plus'):
models = translator.get_registered_models()
names = ', '.join(m.__name__ for m in models)
print('modeltrans: Registered %d models for translation'
' (%s) [pid: %d].' % (len(models), names, os.getpid()))
except IndexError:
pass
def handle_translation_registrations():
'''
Ensures that any configuration of the TranslationOption(s) are handled when
importing modeltranslation.
This makes it possible for scripts/management commands that affect models
but know nothing of modeltranslation.
'''
from modeltrans.settings import ENABLE_REGISTRATIONS
if not ENABLE_REGISTRATIONS:
# If the user really wants to disable this, they can, possibly at their
# own expense. This is generally only required in cases where other
# apps generate import errors and requires extra work on the user's
# part to make things work.
return
# Trigger autodiscover, causing any TranslationOption initialization
# code to execute.
autodiscover()
|
def autodiscover(create_virtual_fields=True):
'''
Auto-discover INSTALLED_APPS translation.py modules and fail silently when
not present. This forces an import on them to register.
Also import explicit modules.
'''
import os
import sys
import copy
from django.utils.module_loading import module_has_submodule
from modeltrans.translator import translator
if not create_virtual_fields:
translator.disable_create_virtual_fields()
from importlib import import_module
from django.conf import settings
from django.apps import apps
mods = [(app_config.name, app_config.module) for app_config in apps.get_app_configs()]
for (app, mod) in mods:
# Attempt to import the app's translation module.
module = '%s.translation' % app
before_import_registry = copy.copy(translator._registry)
try:
import_module(module)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
translator._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an translation module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'translation'):
raise
# In debug mode, print a list of registered models and pid to stdout.
# Note: Differing model order is fine, we don't rely on a particular
# order, as far as base classes are registered before subclasses.
if settings.DEBUG:
try:
if sys.argv[1] in ('runserver', 'runserver_plus'):
models = translator.get_registered_models()
names = ', '.join(m.__name__ for m in models)
print('modeltrans: Registered %d models for translation'
' (%s) [pid: %d].' % (len(models), names, os.getpid()))
except IndexError:
pass
def handle_translation_registrations(create_virtual_fields=True):
'''
Ensures that any configuration of the TranslationOption(s) are handled when
importing modeltranslation.
This makes it possible for scripts/management commands that affect models
but know nothing of modeltranslation.
'''
from modeltrans.settings import ENABLE_REGISTRATIONS
if not ENABLE_REGISTRATIONS:
# If the user really wants to disable this, they can, possibly at their
# own expense. This is generally only required in cases where other
# apps generate import errors and requires extra work on the user's
# part to make things work.
return
# Trigger autodiscover, causing any TranslationOption initialization
# code to execute.
autodiscover(create_virtual_fields)
| Python | 0 |
4c4499dcb86ae16a7d3822feab4390adca89d348 | Bump version to 0.12.1 | pingparsing/__version__.py | pingparsing/__version__.py | # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright {}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.12.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright {}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.12.0"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| Python | 0 |
0b94543d605ad64149faa6df2e3d8bf2e4b5c08c | remove print statement | plots/gender_by_country.py | plots/gender_by_country.py | from __future__ import print_function
from collections import OrderedDict
import csv
import numpy as np
import pandas
import world_countries as wc
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import autoload_static
import os
def plot(newest_changes):
filelist = os.listdir('/home/maximilianklein/snapshot_data/{}/'.format(newest_changes))
site_linkss_file = [f for f in filelist if f.startswith('worldmap')][0]
if newest_changes == 'newest-changes':
date_range = site_linkss_file.split('worldmap-index-from-')[1].split('.csv')[0].replace('-',' ')
print(date_range)
csv_to_read = '/home/maximilianklein/snapshot_data/{}/{}'.format(newest_changes,site_linkss_file)
df = pandas.DataFrame.from_csv(csv_to_read)
major = df[df['total'] > 100]
# https://github.com/chdoig/pyladiesatx-bokeh-tutorial
world_countries = wc.data.copy()
country_xs = [world_countries[code]['lons'] for code in world_countries]
country_ys = [world_countries[code]['lats'] for code in world_countries]
country_names = [world_countries[code]['name'] for code in world_countries]
def lookup_wigi(code):
try:
return df.ix[code]['Score']
except KeyError:
return -1
index_vals = np.array([lookup_wigi(code) for code in world_countries])
def fmt(c):
return int(np.nan_to_num(c))
colors = [
"#%02x%02x%02x" % (fmt(r), fmt(g), fmt(b)) for r, g, b in
zip(np.floor(250*(1-index_vals)),
np.floor(200*(1-index_vals)),
np.floor(100*index_vals))]
source = ColumnDataSource(
data=dict(
name=country_names,
wigi_index=[str(idx) for idx in index_vals]
)
)
# setup widgets
TOOLS = "pan,wheel_zoom,box_zoom,reset,hover,save"
title_suffix = 'Changes since {}'.format(date_range) if newest_changes == 'newest-changes' else 'All Time'
p = figure(title="Gender by Country {}".format(title_suffix), tools=TOOLS)
p.patches(country_xs, country_ys, fill_color=colors, source=source)
hover = p.select(dict(type=HoverTool))
hover.point_policy = "follow_mouse"
hover.tooltips = OrderedDict([
("wigi", "@wigi_index"),
("Country", "@name"),
])
js_filename = "gender_by_country_{}.js".format(newest_changes)
script_path = "./assets/js/"
output_path = "./files/assets/js/"
# generate javascript plot and corresponding script tag
js, tag = autoload_static(p, CDN, script_path + js_filename)
with open(output_path + js_filename, 'w') as js_file:
js_file.write(js)
return tag
if __name__ == "__main__":
print(plot('newest'))
print(plot('newest-changes'))
| from __future__ import print_function
from collections import OrderedDict
import csv
import numpy as np
import pandas
import world_countries as wc
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import autoload_static
import os
def plot(newest_changes):
filelist = os.listdir('/home/maximilianklein/snapshot_data/{}/'.format(newest_changes))
site_linkss_file = [f for f in filelist if f.startswith('worldmap')][0]
if newest_changes == 'newest-changes':
date_range = site_linkss_file.split('worldmap-index-from-')[1].split('.csv')[0].replace('-',' ')
print(date_range)
csv_to_read = '/home/maximilianklein/snapshot_data/{}/{}'.format(newest_changes,site_linkss_file)
df = pandas.DataFrame.from_csv(csv_to_read)
major = df[df['total'] > 100]
# https://github.com/chdoig/pyladiesatx-bokeh-tutorial
world_countries = wc.data.copy()
country_xs = [world_countries[code]['lons'] for code in world_countries]
country_ys = [world_countries[code]['lats'] for code in world_countries]
country_names = [world_countries[code]['name'] for code in world_countries]
def lookup_wigi(code):
try:
return df.ix[code]['Score']
except KeyError:
return -1
index_vals = np.array([lookup_wigi(code) for code in world_countries])
def fmt(c):
return int(np.nan_to_num(c))
colors = [
"#%02x%02x%02x" % (fmt(r), fmt(g), fmt(b)) for r, g, b in
zip(np.floor(250*(1-index_vals)),
np.floor(200*(1-index_vals)),
np.floor(100*index_vals))]
print(colors)
source = ColumnDataSource(
data=dict(
name=country_names,
wigi_index=[str(idx) for idx in index_vals]
)
)
# setup widgets
TOOLS = "pan,wheel_zoom,box_zoom,reset,hover,save"
title_suffix = 'Changes since {}'.format(date_range) if newest_changes == 'newest-changes' else 'All Time'
p = figure(title="Gender by Country {}".format(title_suffix), tools=TOOLS)
p.patches(country_xs, country_ys, fill_color=colors, source=source)
hover = p.select(dict(type=HoverTool))
hover.point_policy = "follow_mouse"
hover.tooltips = OrderedDict([
("wigi", "@wigi_index"),
("Country", "@name"),
])
js_filename = "gender_by_country_{}.js".format(newest_changes)
script_path = "./assets/js/"
output_path = "./files/assets/js/"
# generate javascript plot and corresponding script tag
js, tag = autoload_static(p, CDN, script_path + js_filename)
with open(output_path + js_filename, 'w') as js_file:
js_file.write(js)
return tag
if __name__ == "__main__":
print(plot('newest'))
print(plot('newest-changes'))
| Python | 0.999999 |
12995be9490bde60c92e6f962b748832c083fe45 | use API and HTTP HEAD instead | modules/subreddit.py | modules/subreddit.py | import re
import urllib.request as req
import urllib.error as err
class SubredditModule:
subre = re.compile(r"^(?:.* )?/r/([A-Za-z0-9][A-Za-z0-9_]{2,20})")
def __init__(self, circa):
self.circa = circa
self.events = {
"message": [self.findsub]
}
def findsub(self, fr, to, msg, m):
for sub in self.subre.findall(msg):
try:
r = req.Request("http://api.reddit.com/r/" + sub + ".json")
r.get_method = lambda: "HEAD"
req.urlopen(r)
self.circa.say(to, "http://www.reddit.com/r/" + sub)
except err.HTTPError as e:
pass
module = SubredditModule
| import re
import urllib.request as req
class SubredditModule:
subre = re.compile(r"^(?:.* )?/r/([A-Za-z0-9][A-Za-z0-9_]{2,20})")
def __init__(self, circa):
self.circa = circa
self.events = {
"message": [self.findsub]
}
def findsub(self, fr, to, msg, m):
for sub in self.subre.findall(msg):
url = "http://www.reddit.com/r/" + sub
try:
req.urlopen(url)
self.circa.say(to, url)
except:
pass
module = SubredditModule
| Python | 0 |
c1e84bd196f28c35b032a609a3edb5f596216f71 | fix for document.iter | mongoext/document.py | mongoext/document.py | from __future__ import absolute_import
import mongoext.collection
import mongoext.scheme
import mongoext.exc
class MetaDocument(type):
def __new__(cls, name, bases, attrs):
fields = {}
for base in bases:
for name, obj in vars(base).iteritems():
if issubclass(type(obj), mongoext.scheme.Field):
fields[name] = obj
for name, obj in attrs.iteritems():
if issubclass(type(obj), mongoext.scheme.Field):
fields[name] = obj
attrs['__scheme__'] = mongoext.scheme.Scheme(fields)
return super(MetaDocument, cls).__new__(cls, name, bases, attrs)
class Document(object):
__metaclass__ = MetaDocument
__scheme__ = None
_id = mongoext.scheme.Field()
def __init__(self, **data):
for name, value in data.items():
if name not in self.__scheme__:
raise mongoext.exc.SchemeError(name)
setattr(self, name, value)
def __contains__(self, name):
return name in self.__scheme__
def __len__(self):
return len(self.__scheme__)
def __iter__(self):
for name in self.__scheme__:
yield name, getattr(self, name, None)
def __hash__(self):
return super(object, self).__hash__()
def __repr__(self):
return '<{}: {}>'.format(type(self).__name__, self._id)
| from __future__ import absolute_import
import collections
import mongoext.collection
import mongoext.scheme
import mongoext.exc
class MetaDocument(type):
def __new__(cls, name, bases, attrs):
fields = {}
for base in bases:
for name, obj in vars(base).iteritems():
if issubclass(type(obj), mongoext.scheme.Field):
fields[name] = obj
for name, obj in attrs.iteritems():
if issubclass(type(obj), mongoext.scheme.Field):
fields[name] = obj
attrs['__scheme__'] = mongoext.scheme.Scheme(fields)
return super(MetaDocument, cls).__new__(cls, name, bases, attrs)
class Document(object):
__metaclass__ = MetaDocument
__scheme__ = None
_id = mongoext.scheme.Field()
def __init__(self, **data):
for name, value in data.items():
if name not in self.__scheme__:
raise mongoext.exc.SchemeError(name)
setattr(self, name, value)
def __contains__(self, name):
return name in self.__scheme__
def __len__(self):
return len(self.__scheme__)
def __iter__(self):
for name in self.__scheme__:
yield name, getattr(self, name)
def __hash__(self):
return super(object, self).__hash__()
def __repr__(self):
return '<{}: {}>'.format(type(self).__name__, self._id)
| Python | 0.000001 |
15c2595e126689d184a5de52b8f209b4e3e6eb67 | add a test for json | zq_gen/str.py | zq_gen/str.py | '''
Helper functions for string related operation
'''
import unittest
def cmd_str2dic(cmd_str):
words = cmd_str.split()
rst = {}
if len(words) >= 1:
begin = 0;
if words[0][0:1] != '-': # the first one could be the the name of the command
rst['cmd_name'] = words[0]
begin = 1
curr_word = '...' # default parameter
quoted = False
curly_braced = False
for word in words[begin:]:
if quoted: # expecting the reverse double quote
if word.endswith('"'):
quoted = False
word = word[:-1]
elif curly_braced: # expecting the reverse curly brace
if word.endswith('}'):
curly_braced = False
else:
if word[0:1]=='-': # a new parameter
curr_word = word
rst[curr_word] = ''
continue
if word.startswith('"'): # meet double quote
if word.endswith('"'):
word = word[1:-1]
else:
quoted = True
word = word[1:]
elif word.startswith('{'): # meet curly brace
if not word.endswith('}'):
curly_braced = True
# append to current parameter
if len(rst[curr_word]) == 0: # first value
rst[curr_word] += word
else: # following value, add a space
rst[curr_word] += ' '+word
return rst
# Unit test class
class TestString(unittest.TestCase):
def test_json(self):
cmd_str = 'command -t job_type -p {"num1":1, "num2":2, "str1":"abcd", "str2":"efgh"}'
cmd_dict = cmd_str2dic(cmd_str)
exp_dict = {
'cmd_name' : 'command',
'-t' : 'job_type',
'-p' : '{"num1":1, "num2":2, "str1":"abcd", "str2":"efgh"}'
}
self.assertEqual(cmd_dict, exp_dict)
def test_primary_cmd(self):
cmd_str = 'schedule -n job name -dsc job description -t job_type -p "-d 20 -n 5"'
cmd_dict = cmd_str2dic(cmd_str)
exp_dict = {
'cmd_name' : 'schedule',
'-n' : 'job name',
'-dsc' : 'job description',
'-t' : 'job_type',
'-p' : '-d 20 -n 5'
}
self.assertEqual(cmd_dict, exp_dict)
def test_mv_avg_cmd(self):
cmd_str = '-n 5 -d 20'
cmd_dict = cmd_str2dic(cmd_str)
exp_dict = {
'-n': '5',
'-d': '20'
}
self.assertEqual(cmd_dict, exp_dict)
if __name__ == '__main__':
unittest.main()
| '''
Helper functions for string related operation
'''
import unittest
def cmd_str2dic(cmd_str):
words = cmd_str.split()
rst = {}
if len(words) >= 1:
begin = 0;
if words[0][0:1] != '-': # the first one could be the the name of the command
rst['cmd_name'] = words[0]
begin = 1
curr_word = '...' # default parameter
quoted = False
for word in words[begin:]:
if quoted: # currently expecting the reverse double quote
if word.endswith('"'):
quoted = False
word = word[:-1]
else:
if word[0:1]=='-': # a new parameter
curr_word = word
rst[curr_word] = ''
continue
if word.startswith('"'):
quoted = True
word = word[1:]
# append to current parameter
if len(rst[curr_word]) == 0: # first value
rst[curr_word] += word
else: # following value, add a space
rst[curr_word] += ' '+word
return rst
# Unit test class
class TestString(unittest.TestCase):
def test_primary_cmd(self):
cmd_str = 'schedule -n job name -dsc job description -t job_type -p "-d 20 -n 5"'
cmd_dict = cmd_str2dic(cmd_str)
exp_dict = {
'cmd_name': 'schedule',
'-n': 'job name',
'-dsc': 'job description',
'-t': 'job_type',
'-p': '-d 20 -n 5'
}
self.assertEqual(cmd_dict, exp_dict)
def test_mv_avg_cmd(self):
cmd_str = '-n 5 -d 20'
cmd_dict = cmd_str2dic(cmd_str)
exp_dict = {
'-n': '5',
'-d': '20'
}
self.assertEqual(cmd_dict, exp_dict)
if __name__ == '__main__':
unittest.main()
| Python | 0.000023 |
8bbb160cc742fa04c7aace678afa0d226c0d1407 | fix sample script | resources/examples/ClickToCall.py | resources/examples/ClickToCall.py | import string, cgi, time, thread
import sys, urllib, urllib2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
number1 = ""
number2 = ""
listeningPort = 8081
listeningIp = "127.0.0.1"
bluescaleIp = "127.0.0.1"
bluescalePort = 8080
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.do_POST()
def do_POST(self):
if self.path == "/Status":
self.printParams()
self.postOK()
else:
self.printParams()
self.connectCall()
def printParams(self):
params = self.parseParams()
for field in params.keys():
print( field + "=" + "".join(params[field]))
def postOK(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
def parseParams(self):
length = int(self.headers.getheader('Content-Length'))
params = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
return params
def connectCall(self):
self.postOK()
str = """
<Response>
<Dial>
<Number>""" + number2 + """</Number>
<Action>http://""" + listeningIp + ":" + str(listeningPort) + """/Status</Action>
</Dial>
</Response>
"""
self.wfile.write(str)
return
def main():
try:
server = HTTPServer( (listeningIp, listeningPort), MyHandler)
print("going to connect " + number1 + " to " + number2)
thread.start_new_thread(serveWeb, (server,))
postCall()
while True:
time.sleep(5)
#server.serve_forever()
#time.sleep(5000)
except Exception, err:
print("damn error = " + str(err))
def serveWeb(server):
server.serve_forever()
print("serving...")
def postCall():
data = urllib.urlencode( {"To" : number1, "From": number2, "Url" : "http://" + (listeningIp + ":" + str(listeningPort) + "/")} )
f = urllib.urlopen( "http://" + bluescaleIp + ":" + str(bluescalePort) + "/Calls/" ,data)
r = f.read()
print(r)
if __name__ == '__main__':
number1 = sys.argv[1]
number2 = sys.argv[2]
main()
| import string, cgi, time, thread
import sys, urllib, urllib2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
number1 = ""
number2 = ""
listeningPort = 8081
listeningIp = "127.0.0.1"
bluescaleIp = "127.0.0.1"
bluescalePort = 8080
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.do_POST()
def do_POST(self):
if self.path == "/Status":
self.printParams()
self.postOK()
else:
self.printParams()
self.connectCall()
def printParams(self):
params = self.parseParams()
for field in params.keys():
print( field + "=" + "".join(params[field]))
def postOK(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
def parseParams(self):
length = int(self.headers.getheader('Content-Length'))
params = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
return params
def connectCall(self):
self.postOK()
str = """
<Response>
<Dial>
<Number>""" + number2 + """</Number>
<Action>http://""" + listeningIp + ":" + str(listeningPort) + """/Status</Action>
</Dial>
</Response>
"""
self.wfile.write(str)
return
def main():
try:
server = HTTPServer( (listeningIp, listeningPort), MyHandler)
print("going to connect " + number1 + " to " + number2)
thread.start_new_thread(serveWeb, (server,))
postCall()
while True:
time.sleep(5)
#server.serve_forever()
#time.sleep(5000)
except Exception, err:
print("damn error = " + str(err))
def serveWeb(server):
server.serve_forever()
print("serving...")
def postCall():
data = urllib.urlencode( {"To" : number1, "From": number2, "Url" : (listeningIp + ":" + str(listeningPort) + "/")} )
f = urllib.urlopen( "http://" + bluescaleIp + ":" + str(bluescalePort) + "/Calls/" ,data)
r = f.read()
print(r)
if __name__ == '__main__':
number1 = sys.argv[1]
number2 = sys.argv[2]
main()
| Python | 0.000001 |
7cd3c0449b05e75ffbe5ba346bab3ff389f63b9d | clean up map_async_bench | tests/benchmark/map_async_bench.py | tests/benchmark/map_async_bench.py | import threading
import random
import time
import logging
import sys
from os.path import dirname
sys.path.append(dirname(dirname(dirname(__file__))))
import hazelcast
REQ_COUNT = 50000
ENTRY_COUNT = 10 * 1000
VALUE_SIZE = 10000
GET_PERCENTAGE = 40
PUT_PERCENTAGE = 40
logging.basicConfig(format='%(asctime)s%(msecs)03d [%(name)s] %(levelname)s: %(message)s', datefmt="%H:%M%:%S,")
logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger("main")
config = hazelcast.ClientConfig()
config.group_config.name = "dev"
config.group_config.password = "dev-pass"
config.network_config.addresses.append("127.0.0.1:5701")
client = hazelcast.HazelcastClient(config)
class Test(object):
def __init__(self):
self.ops = 0
self.event = threading.Event()
def incr(self, _):
self.ops += 1
if self.ops == REQ_COUNT:
self.event.set()
def run(self):
my_map = client.get_map("default")
for _ in xrange(0, REQ_COUNT):
key = int(random.random() * ENTRY_COUNT)
operation = int(random.random() * 100)
if operation < GET_PERCENTAGE:
my_map.get_async(key, self.incr)
elif operation < GET_PERCENTAGE + PUT_PERCENTAGE:
my_map.put_async(key, "x" * VALUE_SIZE, -1, self.incr)
else:
my_map.remove_async(key, self.incr)
t = Test()
start = time.time()
t.run()
t.event.wait()
time_taken = time.time() - start
print("Took %s seconds for %d requests" % (time_taken, REQ_COUNT))
print("ops per second: %s" % (t.ops/time_taken))
| import threading
import random
import time
import logging
import sys
from os.path import dirname
sys.path.append(dirname(dirname(dirname(__file__))))
import hazelcast
REQ_COUNT = 20000
ENTRY_COUNT = 10 * 1000
VALUE_SIZE = 10000
GET_PERCENTAGE = 40
PUT_PERCENTAGE = 40
logging.basicConfig(format='%(asctime)s%(msecs)03d [%(name)s] %(levelname)s: %(message)s', datefmt="%H:%M%:%S,")
logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger("main")
config = hazelcast.ClientConfig()
config.group_config.name = "dev"
config.group_config.password = "dev-pass"
config.network_config.addresses.append("127.0.0.1:5701")
client = hazelcast.HazelcastClient(config)
class Test(object):
ops = 0
def get_cb(self, _):
self.ops += 1
def put_cb(self, _):
self.ops += 1
def remove_cb(self, _):
self.ops += 1
def run(self):
my_map = client.get_map("default")
for _ in xrange(0, REQ_COUNT):
key = int(random.random() * ENTRY_COUNT)
operation = int(random.random() * 100)
if operation < GET_PERCENTAGE:
my_map.get_async(key, self.get_cb)
elif operation < GET_PERCENTAGE + PUT_PERCENTAGE:
my_map.put_async(key, "x" * VALUE_SIZE, -1, self.put_cb)
else:
my_map.remove_async(key, self.remove_cb)
t = Test()
start = time.time()
t.run()
while t.ops != REQ_COUNT:
time.sleep(0.01)
print("ops per second: %d" % (t.ops/(time.time()-start)))
| Python | 0.000018 |
29315213a8503de018a76badc71da3737d2b54c7 | Fix spiffsgen example test | examples/storage/spiffsgen/example_test.py | examples/storage/spiffsgen/example_test.py | from __future__ import print_function
import os
import hashlib
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC')
def test_examples_spiffsgen(env, extra_data):
# Test with default build configurations
dut = env.get_dut('spiffsgen', 'examples/storage/spiffsgen', dut_class=ttfw_idf.ESP32DUT)
dut.start_app()
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'spiffs_image')
# Expect hello.txt is read successfully
with open(os.path.join(base_dir, 'hello.txt'), 'r') as hello_txt:
dut.expect('Read from hello.txt: ' + hello_txt.read().rstrip())
# Expect alice.txt MD5 hash is computed accurately
with open(os.path.join(base_dir, 'sub', 'alice.txt'), 'rb') as alice_txt:
alice_md5 = hashlib.md5(alice_txt.read()).hexdigest()
dut.expect('Computed MD5 hash of alice.txt: ' + alice_md5)
if __name__ == '__main__':
test_examples_spiffsgen()
| from __future__ import print_function
import os
import hashlib
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC')
def test_examples_spiffsgen(env, extra_data):
# Test with default build configurations
dut = env.get_dut('spiffsgen', 'examples/storage/spiffsgen', dut_class=ttfw_idf.ESP32DUT)
dut.start_app()
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'spiffs_image')
# Expect hello.txt is read successfully
with open(os.path.join(base_dir, 'hello.txt'), 'r') as hello_txt:
dut.expect('Read from hello.txt: ' + hello_txt.read())
# Expect alice.txt MD5 hash is computed accurately
with open(os.path.join(base_dir, 'sub', 'alice.txt'), 'rb') as alice_txt:
alice_md5 = hashlib.md5(alice_txt.read()).hexdigest()
dut.expect('Computed MD5 hash of alice.txt: ' + alice_md5)
if __name__ == '__main__':
test_examples_spiffsgen()
| Python | 0 |
7c762733311c6a52f0a7605a9495f8234c1d6ff2 | put portLo..Hi as arg | predictor/server/server.py | predictor/server/server.py | #!/usr/bin/python
import sys
from datetime import datetime
from server_thread import ServerThread as Server
def main(argv):
if len(sys.argv)!=4:
print 'USAGE: phyton prediction_server.py [serverId] [portLo] [portHi]'
return
host = '127.0.0.1'
serverId = argv[1]
portLo,portHi = int(argv[2]),int(argv[3])
upAt = datetime.now().strftime("%Y:%m:%d %H:%M:%S")
print >> sys.stderr, '******************************************************'
print >> sys.stderr,"Ijah predictor server :)"
print >> sys.stderr,"[id= "+serverId+"]"
print >> sys.stderr,"[ports= "+str(portLo)+" to "+str(portHi)+"]"
print >> sys.stderr,"[upFrom= "+upAt+"]"
threadList = [Server(i,"serverThread_"+str(serverId)+"_"+str(i),host,port)
for i,port in enumerate(range(portLo, portHi+1))]
for t in threadList:
t.daemon=True
t.start()
while True:
pass
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/python
import sys
from datetime import datetime
from server_thread import ServerThread as Server
from config import serverConfig as scfg
def main():
if len(sys.argv)!=2:
print 'USAGE: phyton prediction_server.py [serverId]'
return
serverId = sys.argv[1]
if serverId not in scfg['ports']:
print 'FATAL: serverId unknown'
return
host = scfg['host']
portLo,portHi = scfg['ports'][serverId]
upAt = datetime.now().strftime("%Y:%m:%d %H:%M:%S")
print >> sys.stderr, '******************************************************'
print >> sys.stderr,"Ijah predictor server :)"
print >> sys.stderr,"[id= "+serverId+"]"
print >> sys.stderr,"[ports= "+str(portLo)+" to "+str(portHi)+"]"
print >> sys.stderr,"[upFrom= "+upAt+"]"
threadList = [Server(i,"serverThread_"+str(serverId)+"_"+str(i),host,port)
for i,port in enumerate(range(portLo, portHi+1))]
for t in threadList:
t.daemon=True
t.start()
while True:
pass
if __name__ == '__main__':
main()
| Python | 0.000001 |
87ca8475f58b057e8043f8b398bd76123a89a733 | Revert "parsing html" | moz/minutes/helpers.py | moz/minutes/helpers.py | #!/usr/bin/env python
# encoding: utf-8
"""
helpers.py
Some modules to help with this project
Created by Karl Dubost on 2016-02-24.
Copyright (c) 2016 La Grange. All rights reserved.
MIT License
"""
import requests
def fetch_content(uri):
'''Fetch the URI and returns the raw content and its encoding'''
content = requests.get(uri)
return content.text, content.encoding
def main():
'''core program'''
pass
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
# encoding: utf-8
"""
helpers.py
Some modules to help with this project
Created by Karl Dubost on 2016-02-24.
Copyright (c) 2016 La Grange. All rights reserved.
MIT License
"""
import io
import sys
import lxml.html
import requests
def fetch_content(uri):
'''Fetch the URI and returns the raw content and its encoding'''
content = requests.get(uri)
return content.text, content.encoding
def html_parse(content):
'''returns a parsed HTML content'''
html = ''
try:
html = lxml.html.parse(io.StringIO(content))
except Exception, e:
raise e
return html
def main():
'''core program'''
pass
if __name__ == "__main__":
sys.exit(main())
| Python | 0 |
90b75ba76c5f98abf3d6484cc9c51119042b7812 | Fix issues with the tethys manage sync command. | tethys_apps/cli/manage_commands.py | tethys_apps/cli/manage_commands.py | """
********************************************************************************
* Name: manage_commands.py
* Author: Nathan Swain
* Created On: 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
import os
import subprocess
from tethys_apps.base.testing.environment import set_testing_environment
#/usr/lib/tethys/src/tethys_apps/cli
CURRENT_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TETHYS_HOME = os.sep.join(CURRENT_SCRIPT_DIR.split(os.sep)[:-3])
TETHYS_SRC_DIRECTORY = os.sep.join(CURRENT_SCRIPT_DIR.split(os.sep)[:-2])
MANAGE_START = 'start'
MANAGE_SYNCDB = 'syncdb'
MANAGE_COLLECTSTATIC = 'collectstatic'
MANAGE_COLLECTWORKSPACES = 'collectworkspaces'
MANAGE_COLLECT = 'collectall'
MANAGE_CREATESUPERUSER = 'createsuperuser'
MANAGE_SYNC = 'sync'
def get_manage_path(args):
"""
Validate user defined manage path, use default, or throw error
"""
# Determine path to manage.py file
manage_path = os.path.join(TETHYS_SRC_DIRECTORY, 'manage.py')
# Check for path option
if hasattr(args, 'manage'):
manage_path = args.manage or manage_path
# Throw error if path is not valid
if not os.path.isfile(manage_path):
print('ERROR: Can\'t open file "{0}", no such file.'.format(manage_path))
exit(1)
return manage_path
def manage_command(args):
"""
Management commands.
"""
# Get the path to manage.py
manage_path = get_manage_path(args)
# Define the process to be run
primary_process = None
if args.command == MANAGE_START:
if args.port:
primary_process = ['python', manage_path, 'runserver', args.port]
else:
primary_process = ['python', manage_path, 'runserver']
elif args.command == MANAGE_SYNCDB:
intermediate_process = ['python', manage_path, 'makemigrations']
run_process(intermediate_process)
primary_process = ['python', manage_path, 'migrate']
elif args.command == MANAGE_COLLECTSTATIC:
# Run pre_collectstatic
intermediate_process = ['python', manage_path, 'pre_collectstatic']
run_process(intermediate_process)
# Setup for main collectstatic
primary_process = ['python', manage_path, 'collectstatic']
if args.noinput:
primary_process.append('--noinput')
elif args.command == MANAGE_COLLECTWORKSPACES:
# Run collectworkspaces command
if args.force:
primary_process = ['python', manage_path, 'collectworkspaces', '--force']
else:
primary_process = ['python', manage_path, 'collectworkspaces']
elif args.command == MANAGE_COLLECT:
# Convenience command to run collectstatic and collectworkspaces
## Run pre_collectstatic
intermediate_process = ['python', manage_path, 'pre_collectstatic']
run_process(intermediate_process)
## Setup for main collectstatic
intermediate_process = ['python', manage_path, 'collectstatic']
if args.noinput:
intermediate_process.append('--noinput')
run_process(intermediate_process)
## Run collectworkspaces command
primary_process = ['python', manage_path, 'collectworkspaces']
elif args.command == MANAGE_CREATESUPERUSER:
primary_process = ['python', manage_path, 'createsuperuser']
elif args.command == MANAGE_SYNC:
from tethys_apps.harvester import SingletonHarvester
harvester.harvest()
if primary_process:
run_process(primary_process)
def run_process(process):
# Call the process with a little trick to ignore the keyboard interrupt error when it happens
try:
if 'test' in process:
set_testing_environment(True)
subprocess.call(process)
except KeyboardInterrupt:
pass
finally:
set_testing_environment(False)
| """
********************************************************************************
* Name: manage_commands.py
* Author: Nathan Swain
* Created On: 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
import os
import subprocess
from tethys_apps.base.testing.environment import set_testing_environment
#/usr/lib/tethys/src/tethys_apps/cli
CURRENT_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TETHYS_HOME = os.sep.join(CURRENT_SCRIPT_DIR.split(os.sep)[:-3])
TETHYS_SRC_DIRECTORY = os.sep.join(CURRENT_SCRIPT_DIR.split(os.sep)[:-2])
MANAGE_START = 'start'
MANAGE_SYNCDB = 'syncdb'
MANAGE_COLLECTSTATIC = 'collectstatic'
MANAGE_COLLECTWORKSPACES = 'collectworkspaces'
MANAGE_COLLECT = 'collectall'
MANAGE_CREATESUPERUSER = 'createsuperuser'
MANAGE_SYNC = 'sync'
def get_manage_path(args):
"""
Validate user defined manage path, use default, or throw error
"""
# Determine path to manage.py file
manage_path = os.path.join(TETHYS_SRC_DIRECTORY, 'manage.py')
# Check for path option
if hasattr(args, 'manage'):
manage_path = args.manage or manage_path
# Throw error if path is not valid
if not os.path.isfile(manage_path):
print('ERROR: Can\'t open file "{0}", no such file.'.format(manage_path))
exit(1)
return manage_path
def manage_command(args):
"""
Management commands.
"""
# Get the path to manage.py
manage_path = get_manage_path(args)
# Define the process to be run
primary_process = None
if args.command == MANAGE_START:
if args.port:
primary_process = ['python', manage_path, 'runserver', args.port]
else:
primary_process = ['python', manage_path, 'runserver']
elif args.command == MANAGE_SYNCDB:
intermediate_process = ['python', manage_path, 'makemigrations']
run_process(intermediate_process)
primary_process = ['python', manage_path, 'migrate']
elif args.command == MANAGE_COLLECTSTATIC:
# Run pre_collectstatic
intermediate_process = ['python', manage_path, 'pre_collectstatic']
run_process(intermediate_process)
# Setup for main collectstatic
primary_process = ['python', manage_path, 'collectstatic']
if args.noinput:
primary_process.append('--noinput')
elif args.command == MANAGE_COLLECTWORKSPACES:
# Run collectworkspaces command
if args.force:
primary_process = ['python', manage_path, 'collectworkspaces', '--force']
else:
primary_process = ['python', manage_path, 'collectworkspaces']
elif args.command == MANAGE_COLLECT:
# Convenience command to run collectstatic and collectworkspaces
## Run pre_collectstatic
intermediate_process = ['python', manage_path, 'pre_collectstatic']
run_process(intermediate_process)
## Setup for main collectstatic
intermediate_process = ['python', manage_path, 'collectstatic']
if args.noinput:
intermediate_process.append('--noinput')
run_process(intermediate_process)
## Run collectworkspaces command
primary_process = ['python', manage_path, 'collectworkspaces']
elif args.command == MANAGE_CREATESUPERUSER:
primary_process = ['python', manage_path, 'createsuperuser']
elif args.command == MANAGE_SYNC:
from tethys_apps.utilities import sync_tethys_db
sync_tethys_db()
if primary_process:
run_process(primary_process)
def run_process(process):
# Call the process with a little trick to ignore the keyboard interrupt error when it happens
try:
if 'test' in process:
set_testing_environment(True)
subprocess.call(process)
except KeyboardInterrupt:
pass
finally:
set_testing_environment(False)
| Python | 0 |
8ecc26cffabb5a4c80b9a5574b102cc5c63312d3 | Update accounts.py | myuw/views/accounts.py | myuw/views/accounts.py | from myuw.views.page import page
from myuw.util.page_view import page_view
@page_view
def accounts(request):
return page(request, {}, template='accounts.html')
| from myuw.views.page import page
from myuw.util.page_view import page_view
@page_view
def accounts(request):
return page(request, {}, template='accounts.html') | Python | 0.000001 |
affb8417c7592158fbfd62c4cd49608a368ccabf | Switch update flag for full flag | nap/dataviews/views.py | nap/dataviews/views.py |
from collections import defaultdict
from inspect import classify_class_attrs
from django.db.models.fields import NOT_PROVIDED
from django.forms import ValidationError
from django.utils.functional import cached_property
from .fields import field
from .utils import DictObject
class DataView(object):
def __init__(self, obj=None, **kwargs):
if obj is None:
obj = DictObject()
self._obj = obj
self._kwargs = kwargs
@cached_property
def _fields(self):
return {
name: prop
for name, kind, cls, prop in classify_class_attrs(self.__class__)
if isinstance(prop, field)
}
@cached_property
def _field_names(self):
return tuple(self._fields.keys())
def __lshift__(self, other):
'''
Allow implicit reduction using:
>>> data = view << obj
'''
self._obj = other
return self._reduce()
def __rlshift__(self, other):
'''
Allow implicit apply using:
>>> obj = data >> view
'''
return self._apply(other)
def _reduce(self):
'''
Reduce our instance to its serialisable state.
Returns a dict.
'''
return {
name: getattr(self, name)
for name in self._field_names
}
def _apply(self, data, full=False):
'''
Update an instance from supplied data.
If full is True, all fields not tagged as .required=False MUST be
supplied in the data dict.
'''
errors = defaultdict(list)
for name in self._field_names:
required = getattr(self._fields[name], 'required', True)
default = getattr(self._fields[name], 'default', NOT_PROVIDED)
value = data.get(name, default)
if value is NOT_PROVIDED:
if full and required:
errors[name].append(
ValidationError('This field is required')
)
continue
try:
setattr(self, name, value)
except ValidationError as e:
errors[name].append(e.message)
self._errors = dict(errors)
if errors:
raise ValidationError(self._errors)
return self._obj
|
from collections import defaultdict
from inspect import classify_class_attrs
from django.db.models.fields import NOT_PROVIDED
from django.forms import ValidationError
from django.utils.functional import cached_property
from .fields import field
from .utils import DictObject
class DataView(object):
def __init__(self, obj=None, **kwargs):
if obj is None:
obj = DictObject()
self._obj = obj
self._kwargs = kwargs
@cached_property
def _fields(self):
return {
name: prop
for name, kind, cls, prop in classify_class_attrs(self.__class__)
if isinstance(prop, field)
}
@cached_property
def _field_names(self):
return tuple(self._fields.keys())
def __lshift__(self, other):
'''
Allow implicit reduction using:
>>> data = view << obj
'''
self._obj = other
return self._reduce()
def __rlshift__(self, other):
'''
Allow implicit apply(update) using:
>>> obj = data >> view
Note: sets update=True
'''
return self._apply(other, update=True)
def _reduce(self):
'''
Reduce our instance to its serialisable state.
Returns a dict.
'''
return {
name: getattr(self, name)
for name in self._field_names
}
def _apply(self, data, update=False):
'''
Update an instance from supplied data.
If update is False, all fields not tagged as .required=False MUST be
supplied in the data dict.
'''
errors = defaultdict(list)
for name in self._field_names:
required = getattr(self._fields[name], 'required', True)
default = getattr(self._fields[name], 'default', NOT_PROVIDED)
value = data.get(name, default)
if value is NOT_PROVIDED:
if required and not update:
errors[name].append(
ValidationError('This field is required')
)
continue
try:
setattr(self, name, value)
except ValidationError as e:
errors[name].append(e.message)
self._errors = dict(errors)
if errors:
raise ValidationError(self._errors)
return self._obj
| Python | 0 |
9eebf1d43b93a6e1001186693d3a15ce2b5d568e | Add Bank and BankAccount models, add some fields to Supplier model | nbs/models/supplier.py | nbs/models/supplier.py | # -*- coding: utf-8 -*-
from nbs.models import db
from nbs.models.entity import Entity
from nbs.models.misc import FiscalDataMixin
class Supplier(Entity, FiscalDataMixin):
__tablename__ = 'supplier'
__mapper_args__ = {'polymorphic_identity': u'supplier'}
FREIGHT_SUPPLIER = 'FREIGHT_SUPPLIER'
FREIGHT_CUSTOMER = 'FREIGHT_CUSTOMER'
_freight_types = {
FREIGHT_SUPPLIER: 'Flete de proveedor',
FREIGHT_CUSTOMER: 'Flete de cliente',
}
supplier_id = db.Column(db.Integer, db.ForeignKey('entity.id'),
primary_key=True)
name = Entity._name_1
fancy_name = Entity._name_2
payment_term = db.Column(db.Integer) # in days
freight_type = db.Column(db.Enum(*_freight_types.keys(),
name='freight_type'), default=FREIGHT_CUSTOMER)
leap_time = db.Column(db.Integer) # in days
@property
def full_name(self):
fn = u" ({0})".format(self.fancy_name) if self.fancy_name else u""
return u"{0}{1}".format(self.name, fn)
class Bank(db.Model):
__tablename__ = 'bank'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode)
# TODO: Add bank logo, to quickly identify
class BankAccount(db.Model):
__tablename__ = 'bank_account'
TYPE_CC_PESOS = 'TYPE_CC_PESOS'
TYPE_CC_USD = 'TYPE_CC_USD'
TYPE_CA_PESOS = 'TYPE_CA_PESOS'
TYPE_CA_USD = 'TYPE_CA_USD'
TYPE_UNIQUE = 'TYPE_UNIQUE'
_account_type = {
TYPE_CC_PESOS: 'Cuenta Corriente en Pesos',
TYPE_CC_USD: 'Cuenta Corriente en Dólares',
TYPE_CA_PESOS: 'Caja de Ahorro en Pesos',
TYPE_CA_USD: 'Caja de Ahorro en Dólares',
TYPE_UNIQUE: 'Cuenta Única',
}
id = db.Column(db.Integer, primary_key=True)
bank_branch = db.Column(db.Unicode)
account_type = db.Column(db.Enum(*_account_type.keys(),
name='account_type'), default=TYPE_CC_PESOS)
account_number = db.Column(db.Unicode)
account_cbu = db.Column(db.Unicode)
account_owner = db.Column(db.Unicode)
bank_id = db.Column(db.Integer, db.ForeignKey('bank.id'))
bank = db.relationship(Bank, backref="accounts")
supplier_id = db.Column(db.Integer, db.ForeignKey('supplier.supplier_id'))
supplier = db.relationship(Supplier, backref='bank_accounts')
| # -*- coding: utf-8 -*-
from nbs.models import db
from nbs.models.entity import Entity
from nbs.models.misc import FiscalDataMixin
class Supplier(Entity, FiscalDataMixin):
__tablename__ = 'supplier'
__mapper_args__ = {'polymorphic_identity': u'supplier'}
supplier_id = db.Column(db.Integer, db.ForeignKey('entity.id'),
primary_key=True)
name = Entity._name_1
fancy_name = Entity._name_2
payment_term = db.Column(db.Integer)
@property
def full_name(self):
fn = u" ({0})".format(self.fancy_name) if self.fancy_name else u""
return u"{0}{1}".format(self.name, fn)
| Python | 0 |
e1a7262bc4fc841b95ee6fb45c1bb0da5cc3f2c1 | add an option for fallback style in vimrc | tools/clang-format/clang-format.py | tools/clang-format/clang-format.py | # This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/clang-format.py<cr>
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import difflib
import json
import subprocess
import sys
import vim
# set g:clang_format_path to the path to clang-format if it is not on the path
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
if vim.eval('exists("g:clang_format_path")') == "1":
binary = vim.eval('g:clang_format_path')
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = 'file'
if vim.eval('exists("g:clang_format_fallback_style")') == "1":
fallback_style = vim.eval('g:clang_format_fallback_style')
def main():
# Get the current text.
buf = vim.current.buffer
text = '\n'.join(buf)
# Determine range to format.
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Determine the cursor position.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
if cursor < 0:
print 'Couldn\'t determine cursor position. Is your file empty?'
return
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
command = [binary, '-lines', lines, '-style', style, '-cursor', str(cursor)]
if fallback_style:
command.extend(['-fallback-style', fallback_style])
if vim.current.buffer.name:
command.extend(['-assume-filename', vim.current.buffer.name])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text)
# If successful, replace buffer contents.
if stderr:
print stderr
if not stdout:
print ('No output from clang-format (crashed?).\n' +
'Please report to bugs.llvm.org.')
else:
lines = stdout.split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, vim.current.buffer, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
vim.command('goto %d' % (output['Cursor'] + 1))
main()
| # This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/clang-format.py<cr>
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import difflib
import json
import subprocess
import sys
import vim
# set g:clang_format_path to the path to clang-format if it is not on the path
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
if vim.eval('exists("g:clang_format_path")') == "1":
binary = vim.eval('g:clang_format_path')
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = 'file'
def main():
# Get the current text.
buf = vim.current.buffer
text = '\n'.join(buf)
# Determine range to format.
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Determine the cursor position.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
if cursor < 0:
print 'Couldn\'t determine cursor position. Is your file empty?'
return
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
command = [binary, '-lines', lines, '-style', style, '-cursor', str(cursor)]
if vim.current.buffer.name:
command.extend(['-assume-filename', vim.current.buffer.name])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text)
# If successful, replace buffer contents.
if stderr:
print stderr
if not stdout:
print ('No output from clang-format (crashed?).\n' +
'Please report to bugs.llvm.org.')
else:
lines = stdout.split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, vim.current.buffer, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
vim.command('goto %d' % (output['Cursor'] + 1))
main()
| Python | 0.000099 |
f57294c59e197c989536638776738b0ed0bcee1d | disable scheduler.tough_pepper_cases | tools/perf/benchmarks/scheduler.py | tools/perf/benchmarks/scheduler.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from measurements import smoothness
import page_sets
class SchedulerToughSchedulingCases(benchmark.Benchmark):
"""Measures rendering statistics while interacting with pages that have
challenging scheduling properties.
https://docs.google.com/a/chromium.org/document/d/
17yhE5Po9By0sCdM1yZT3LiUECaUr_94rQt9j-4tOQIM/view"""
test = smoothness.Smoothness
page_set = page_sets.ToughSchedulingCasesPageSet
# Pepper plugin is not supported on android.
@benchmark.Disabled('android', 'win', 'mac') # crbug.com/384733
class SchedulerToughPepperCases(benchmark.Benchmark):
"""Measures rendering statistics while interacting with pages that have
pepper plugins"""
test = smoothness.Smoothness
page_set = page_sets.ToughPepperCasesPageSet
def CustomizeBrowserOptions(self, options):
# This is needed for testing pepper plugin.
options.AppendExtraBrowserArgs('--enable-pepper-testing')
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from measurements import smoothness
import page_sets
class SchedulerToughSchedulingCases(benchmark.Benchmark):
"""Measures rendering statistics while interacting with pages that have
challenging scheduling properties.
https://docs.google.com/a/chromium.org/document/d/
17yhE5Po9By0sCdM1yZT3LiUECaUr_94rQt9j-4tOQIM/view"""
test = smoothness.Smoothness
page_set = page_sets.ToughSchedulingCasesPageSet
# Pepper plugin is not supported on android.
@benchmark.Disabled('android', 'win') # crbug.com/384733
class SchedulerToughPepperCases(benchmark.Benchmark):
"""Measures rendering statistics while interacting with pages that have
pepper plugins"""
test = smoothness.Smoothness
page_set = page_sets.ToughPepperCasesPageSet
def CustomizeBrowserOptions(self, options):
# This is needed for testing pepper plugin.
options.AppendExtraBrowserArgs('--enable-pepper-testing')
| Python | 0.000031 |
4fb1ad11add4436395f775a12f0d4e90b99d6594 | add ignore filtering | psutil_mon/psutil_alarm.py | psutil_mon/psutil_alarm.py |
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket, fnmatch, pickle, sys, os
import psutil_mon.settings
hubblemon_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(hubblemon_path)
import common.settings
class psutil_alarm:
def __init__(self):
self.name = 'psutil'
self.sec_interval = 5 # 5 sec interval
def system_list_init(self):
pass
def select_conf(self, client, item, map):
key = '%s:%s' % (client, item)
# exact
if key in map:
return map[key]
# wild card match
for k, v in map.items():
# overwrite if match like net-*
if fnmatch.fnmatch(key, k):
if 'IGNORE' in v:
if fnmatch.fnmatch(key, v['IGNORE']): # skip IGNORE case
continue
return v
return {}
def get_conf(self, client, item): # client: machine name, item: items in psutil (ex, cpu, net, disk...)
# select exact conf
abs_conf = self.select_conf(client, item, psutil_mon.settings.alarm_conf_absolute)
lambda_conf = self.select_conf(client, item, psutil_mon.settings.alarm_conf_lambda)
message_head = '%s:%s' % (client, item)
return (message_head, abs_conf, lambda_conf)
|
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket, fnmatch, pickle, sys, os
import psutil_mon.settings
hubblemon_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(hubblemon_path)
import common.settings
class psutil_alarm:
def __init__(self):
self.name = 'psutil'
self.sec_interval = 5 # 5 sec interval
def system_list_init(self):
pass
def select_conf(self, client, item, map):
key = '%s:%s' % (client, item)
# exact
if key in map:
return map[key]
# wild card match
for k, v in map.items():
# overwrite if match like net-*
if fnmatch.fnmatch(key, k):
return map[k]
return {}
def get_conf(self, client, item): # client: machine name, item: items in psutil (ex, cpu, net, disk...)
# select exact conf
abs_conf = self.select_conf(client, item, psutil_mon.settings.alarm_conf_absolute)
lambda_conf = self.select_conf(client, item, psutil_mon.settings.alarm_conf_lambda)
message_head = '%s:%s' % (client, item)
return (message_head, abs_conf, lambda_conf)
| Python | 0.000001 |
7a66989b62d1776e72229ac36c0ed77235549b1d | Add a data timeout to ensure that we aren't blocked waiting on data for a connection that the pusher server does not think exists. This occurs when the network cable has been unplugged for an extended period of time and then reconnected. | pusherclient/connection.py | pusherclient/connection.py | import websocket
try:
import simplejson as json
except:
import json
from threading import Thread, Timer
import time
import logging
CONNECTION_EVENTS_NEW = [
'initialized',
'connecting',
'connected',
'unavailable',
'failed',
'disconnected',
]
CONNECTION_EVENTS_OLD = [
'pusher:connection_established',
'pusher:connection_failed',
]
class Connection(Thread):
def __init__(self, eventHandler, url, logLevel=logging.INFO):
self.socket = None
self.socket_id = ""
self.eventCallbacks = {}
self.eventHandler = eventHandler
self.url = url
self.needsReconnect = False
self.reconnectInterval = 10
self.bind("pusher:connection_established", self._connect_handler)
self.bind("pusher:connection_failed", self._failed_handler)
self.state = "initialized"
self.logger = logging.getLogger()
self.logger.addHandler(logging.StreamHandler())
if logLevel == logging.DEBUG:
websocket.enableTrace(True)
self.logger.setLevel(logLevel)
# From Martyn's comment at: https://pusher.tenderapp.com/discussions/problems/36-no-messages-received-after-1-idle-minute-heartbeat
# "We send a ping every 5 minutes in an attempt to keep connections
# alive..."
# This is why we set the connection timeout to 5 minutes, since we can
# expect a pusher heartbeat message every 5 minutes. Adding 5 sec to
# account for small timing delays which may cause messages to not be
# received in exact 5 minute intervals.
self.connectionTimeout = 305
self.connectionTimer = Timer(self.connectionTimeout, self._connectionTimedOut)
Thread.__init__(self)
def bind(self, stateEvent, callback):
if stateEvent not in self.eventCallbacks.keys():
self.eventCallbacks[stateEvent] = []
self.eventCallbacks[stateEvent].append(callback)
def run(self):
self._connect()
def _connect(self):
self.state = "connecting"
self.socket = websocket.WebSocketApp(self.url,
self._on_open,
self._on_message,
self._on_error,
self._on_close)
self.socket.run_forever()
while (self.needsReconnect):
self.logger.info("Attempting to connect again in %s seconds." % self.reconnectInterval)
self.state = "unavailable"
time.sleep(self.reconnectInterval)
self.socket.run_forever()
def _on_open(self, ws):
self.logger.info("Connection: Connection opened")
self.connectionTimer.start()
def _on_error(self, ws, error):
self.logger.info("Connection: Error - %s" % error)
self.state = "failed"
self.needsReconnect = True
def _on_message(self, ws, message):
self.logger.info("Connection: Message - %s" % message)
# Stop our timeout timer, since we got some data
self.connectionTimer.cancel()
params = self._parse(message)
if 'event' in params.keys():
if (params['event'] in CONNECTION_EVENTS_NEW) or (params['event'] in CONNECTION_EVENTS_OLD):
if params['event'] in self.eventCallbacks.keys():
for callback in self.eventCallbacks[params['event']]:
callback(params['data'])
else:
if 'channel' in params.keys():
self.eventHandler(params['event'],
params['data'],
params['channel'])
else:
self.logger.info("Connection: Unknown event type")
# We've handled our data, so restart our connection timeout handler
self.connectionTimer = Timer(self.connectionTimeout, self._connectionTimedOut)
self.connectionTimer.start()
def _on_close(self, ws):
self.logger.info("Connection: Connection closed")
self.state = "disconnected"
def _parse(self, message):
return json.loads(message)
def _send_event(self, eventName, data):
self.socket.send(json.dumps({'event':eventName, 'data':data}))
def _connect_handler(self, data):
parsed = json.loads(data)
self.socket_id = parsed['socket_id']
self.state = "connected"
def _failed_handler(self, data):
parsed = json.loads(data)
self.state = "failed"
def _connectionTimedOut(self):
self.logger.info("Did not receive any data in time. Reconnecting.")
self.state = "failed"
self.needsReconnect = True
self.socket.close()
| import websocket
try:
import simplejson as json
except:
import json
from threading import Thread
import time
import logging
CONNECTION_EVENTS_NEW = [
'initialized',
'connecting',
'connected',
'unavailable',
'failed',
'disconnected',
]
CONNECTION_EVENTS_OLD = [
'pusher:connection_established',
'pusher:connection_failed',
]
class Connection(Thread):
def __init__(self, eventHandler, url, logLevel=logging.INFO):
self.socket = None
self.socket_id = ""
self.eventCallbacks = {}
self.eventHandler = eventHandler
self.url = url
self.needsReconnect = False
self.reconnectInterval = 10
self.bind("pusher:connection_established", self._connect_handler)
self.bind("pusher:connection_failed", self._failed_handler)
self.state = "initialized"
self.logger = logging.getLogger()
self.logger.addHandler(logging.StreamHandler())
if logLevel == logging.DEBUG:
websocket.enableTrace(True)
self.logger.setLevel(logLevel)
Thread.__init__(self)
def bind(self, stateEvent, callback):
if stateEvent not in self.eventCallbacks.keys():
self.eventCallbacks[stateEvent] = []
self.eventCallbacks[stateEvent].append(callback)
def run(self):
self._connect()
def _connect(self):
self.state = "connecting"
self.socket = websocket.WebSocketApp(self.url,
self._on_open,
self._on_message,
self._on_error,
self._on_close)
self.socket.run_forever()
while (self.needsReconnect):
self.logger.info("Attempting to connect again in %s seconds." % self.reconnectInterval)
self.state = "unavailable"
time.sleep(self.reconnectInterval)
self.socket.run_forever()
def _on_open(self, ws):
self.logger.info("Connection: Connection opened")
def _on_error(self, ws, error):
self.logger.info("Connection: Error - %s" % error)
self.state = "failed"
self.needsReconnect = True
def _on_message(self, ws, message):
self.logger.info("Connection: Message - %s" % message)
params = self._parse(message)
if 'event' in params.keys():
if (params['event'] in CONNECTION_EVENTS_NEW) or (params['event'] in CONNECTION_EVENTS_OLD):
if params['event'] in self.eventCallbacks.keys():
for callback in self.eventCallbacks[params['event']]:
callback(params['data'])
else:
if 'channel' in params.keys():
self.eventHandler(params['event'],
params['data'],
params['channel'])
else:
self.logger.info("Connection: Unknown event type")
def _on_close(self, ws):
self.logger.info("Connection: Connection closed")
self.state = "disconnected"
def _parse(self, message):
return json.loads(message)
def _send_event(self, eventName, data):
self.socket.send(json.dumps({'event':eventName, 'data':data}))
def _connect_handler(self, data):
parsed = json.loads(data)
self.socket_id = parsed['socket_id']
self.state = "connected"
def _failed_handler(self, data):
parsed = json.loads(data)
self.state = "failed"
| Python | 0 |
5cd9499fcc0c1f9b48216aeca11a7adcd8995a47 | Fix for MRV failing to enter enable mode | netmiko/mrv/mrv_ssh.py | netmiko/mrv/mrv_ssh.py | """MRV Communications Driver (OptiSwitch)."""
from __future__ import unicode_literals
import time
import re
from netmiko.cisco_base_connection import CiscoSSHConnection
class MrvOptiswitchSSH(CiscoSSHConnection):
"""MRV Communications Driver (OptiSwitch)."""
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r'[>#]')
self.set_base_prompt()
self.enable()
self.disable_paging(command="no cli-paging")
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.set_base_prompt()
self.clear_buffer()
def enable(self, cmd='enable', pattern=r'#', re_flags=re.IGNORECASE):
"""Enable mode on MRV uses no password."""
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
output += self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags)
if not self.check_enable_mode():
msg = "Failed to enter enable mode. Please ensure you pass " \
"the 'secret' argument to ConnectHandler."
raise ValueError(msg)
return output
def save_config(self, cmd='save config flash', confirm=False):
"""Saves configuration."""
return super(MrvOptiswitchSSH, self).save_config(cmd=cmd, confirm=confirm)
| """MRV Communications Driver (OptiSwitch)."""
from __future__ import unicode_literals
import time
import re
from netmiko.cisco_base_connection import CiscoSSHConnection
class MrvOptiswitchSSH(CiscoSSHConnection):
"""MRV Communications Driver (OptiSwitch)."""
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r'[>#]')
self.enable()
self.set_base_prompt()
self.disable_paging(command="no cli-paging")
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def enable(self, cmd='enable', pattern=r'#', re_flags=re.IGNORECASE):
"""Enable mode on MRV uses no password."""
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
output += self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags)
if not self.check_enable_mode():
msg = "Failed to enter enable mode. Please ensure you pass " \
"the 'secret' argument to ConnectHandler."
raise ValueError(msg)
return output
def save_config(self, cmd='save config flash', confirm=False):
"""Saves configuration."""
return super(MrvOptiswitchSSH, self).save_config(cmd=cmd, confirm=confirm)
| Python | 0 |
d24a8db471cc9a415e3e2081e702199990bd6ac4 | Add option to configure plot's linewidth | pyexperiment/utils/plot.py | pyexperiment/utils/plot.py | """Provides setup for matplotlib figures
Written by Peter Duerr.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import matplotlib
from matplotlib import pyplot as plt
def setup_matplotlib(font_size=14,
label_size=14,
use_tex=True,
linewidth=2):
"""Setup basic style for matplotlib figures
"""
font_size = int(font_size)
font = {'family': 'normal',
'weight': 'normal',
'size': font_size}
# ** is elegant here
matplotlib.rc('font', **font) # pylint:disable=W0142
matplotlib.rc('text', usetex=use_tex)
matplotlib.rc('lines', linewidth=linewidth)
label_size = int(label_size)
matplotlib.rc('xtick', labelsize=label_size)
matplotlib.rc('ytick', labelsize=label_size)
def quit_figure_on_key(key, figure=None):
"""Add handler to figure (defaults to current figure) that closes it
on a key press event.
"""
def quit_on_keypress(event):
"""Quit the figure on key press
"""
if event.key == key:
plt.close(event.canvas.figure)
if figure is None:
figure = plt.gcf()
figure.canvas.mpl_connect('key_press_event', quit_on_keypress)
def setup_figure(name):
"""Setup a figure that can be closed by pressing 'q' and saved by
pressing 's'.
"""
fig = plt.figure()
fig.canvas.set_window_title(name)
quit_figure_on_key('q', fig)
| """Provides setup for matplotlib figures
Written by Peter Duerr.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import matplotlib
from matplotlib import pyplot as plt
def setup_matplotlib(font_size=14,
label_size=14,
use_tex=True):
"""Setup basic style for matplotlib figures
"""
font_size = int(font_size)
font = {'family': 'normal',
'weight': 'normal',
'size': font_size}
# ** is elegant here
matplotlib.rc('font', **font) # pylint:disable=W0142
matplotlib.rc('text', usetex=use_tex)
label_size = int(label_size)
matplotlib.rc('xtick', labelsize=label_size)
matplotlib.rc('ytick', labelsize=label_size)
def quit_figure_on_key(key, figure=None):
"""Add handler to figure (defaults to current figure) that closes it
on a key press event.
"""
def quit_on_keypress(event):
"""Quit the figure on key press
"""
if event.key == key:
plt.close(event.canvas.figure)
if figure is None:
figure = plt.gcf()
figure.canvas.mpl_connect('key_press_event', quit_on_keypress)
def setup_figure(name):
"""Setup a figure that can be closed by pressing 'q' and saved by
pressing 's'.
"""
fig = plt.figure()
fig.canvas.set_window_title(name)
quit_figure_on_key('q', fig)
| Python | 0.000001 |
9c40fa22c395b3d1dba800f0826606ecf314ddb2 | test update | apps/pypi/tests/test_slurper.py | apps/pypi/tests/test_slurper.py | from django.template.defaultfilters import slugify
from django.test import TestCase
from package.models import Package, Version
from pypi.slurper import Slurper
TEST_PACKAGE_NAME = 'Django'
TEST_PACKAGE_VERSION = '1.3'
TEST_PACKAGE_REPO_NAME = 'django-uni-form'
class SlurpAllTests(TestCase):
def test_get_latest_version_number(self):
slurper = Slurper(TEST_PACKAGE_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_NAME)
self.assertEquals(version, TEST_PACKAGE_VERSION)
def test_get_or_create_package(self):
slurper = Slurper(TEST_PACKAGE_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_NAME)
package, created = slurper.get_or_create_package(TEST_PACKAGE_NAME, version)
self.assertTrue(created)
self.assertTrue(isinstance(package, Package))
self.assertEquals(package.title, TEST_PACKAGE_NAME)
self.assertEquals(package.slug, slugify(TEST_PACKAGE_NAME))
def test_get_or_create_with_repo(self):
slurper = Slurper(TEST_PACKAGE_REPO_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME)
package, created = slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version)
self.assertTrue(created)
self.assertTrue(isinstance(package, Package))
self.assertEquals(package.title, TEST_PACKAGE_REPO_NAME)
self.assertEquals(package.slug, slugify(TEST_PACKAGE_REPO_NAME))
def test_check_versions(self):
slurper = Slurper(TEST_PACKAGE_REPO_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME)
# make me a package (Actually, make me a billionare)
slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version)
# fetch the package for testing
package = Package.objects.get(title=TEST_PACKAGE_REPO_NAME)
self.assertTrue(package.pypi_downloads > 1000) | from django.template.defaultfilters import slugify
from django.test import TestCase
from package.models import Package, Version
from pypi.slurper import Slurper
TEST_PACKAGE_NAME = 'Django'
TEST_PACKAGE_VERSION = '1.2.5'
TEST_PACKAGE_REPO_NAME = 'django-uni-form'
class SlurpAllTests(TestCase):
def test_get_latest_version_number(self):
slurper = Slurper(TEST_PACKAGE_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_NAME)
self.assertEquals(version, TEST_PACKAGE_VERSION)
def test_get_or_create_package(self):
slurper = Slurper(TEST_PACKAGE_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_NAME)
package, created = slurper.get_or_create_package(TEST_PACKAGE_NAME, version)
self.assertTrue(created)
self.assertTrue(isinstance(package, Package))
self.assertEquals(package.title, TEST_PACKAGE_NAME)
self.assertEquals(package.slug, slugify(TEST_PACKAGE_NAME))
def test_get_or_create_with_repo(self):
slurper = Slurper(TEST_PACKAGE_REPO_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME)
package, created = slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version)
self.assertTrue(created)
self.assertTrue(isinstance(package, Package))
self.assertEquals(package.title, TEST_PACKAGE_REPO_NAME)
self.assertEquals(package.slug, slugify(TEST_PACKAGE_REPO_NAME))
def test_check_versions(self):
slurper = Slurper(TEST_PACKAGE_REPO_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME)
# make me a package (Actually, make me a billionare)
slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version)
# fetch the package for testing
package = Package.objects.get(title=TEST_PACKAGE_REPO_NAME)
self.assertTrue(package.pypi_downloads > 1000) | Python | 0.000001 |
fe797b35d4e9b3f623d60b42f38efe3bf9ac3705 | test para vistas de charlas | apps/votos/tests/tests_views.py | apps/votos/tests/tests_views.py | # -*- coding: utf-8 -*-
import unittest
from test_plus.test import TestCase
from ..factories.user import UserFactory
from ..factories.charla import CharlaFactory
from .. import constants
from ..models import Charla
class ViewsTestCase(TestCase):
def setUp(self):
self.user = UserFactory()
def test_index_eventos(self):
self.get('index')
self.response_200()
def test_agendados_eventos(self):
self.get('agendado')
self.response_200()
def test_finalizados_eventos(self):
self.get('finalizado')
self.response_200()
def test_get_registrar_charla(self):
self.assertLoginRequired('registrar_charla')
def test_post_registrar_charla(self):
with self.login(username=self.user.username, password='1234'):
response = self.post('registrar_charla',
data={"titulo": "charla 1",
"descripcion": "descripcion 1"})
self.response_200(response)
def test_user_name_in_index(self):
with self.login(username=self.user.username, password='1234'):
response = self.get("index")
self.assertContains(response,
'<span class="truncate">¡Hola! @{}</span>'.format(self.user.username),
status_code=200)
class TestCharlaView(TestCase):
def test_charlas_posibles(self):
CharlaFactory(estado=constants.ESTAOO_FINALIZADO)
qs = [CharlaFactory(estado=constants.ESTADO_POSIBLE)]
self.get('index')
charlas = self.get_context('charlas')
self.assertEqual(len(qs), charlas.count())
for i,model in enumerate(charlas):
self.assertEqual(qs[i], model)
def test_charlas_seleccionadas(self):
CharlaFactory(estado=constants.ESTAOO_FINALIZADO)
qs = [CharlaFactory(estado=constants.ESTADO_AGENDADO),
CharlaFactory(estado=constants.ESTADO_POSIBLE)]
self.get('index')
charlas = self.get_context('charlas')
self.assertEqual(len(qs), charlas.count())
for i,model in enumerate(charlas):
self.assertEqual(qs[i], model)
def test_charlas_agendadas(self):
CharlaFactory(estado=constants.ESTAOO_FINALIZADO)
CharlaFactory(estado=constants.ESTADO_POSIBLE)
qs = [CharlaFactory(estado=constants.ESTADO_AGENDADO)]
self.get('agendado')
charlas = self.get_context('charlas')
self.assertEqual(len(qs), charlas.count())
for i,model in enumerate(charlas):
self.assertEqual(qs[i], model)
def test_charlas_finalizadas(self):
CharlaFactory(estado=constants.ESTADO_AGENDADO)
CharlaFactory(estado=constants.ESTADO_POSIBLE)
qs = [CharlaFactory(estado=constants.ESTAOO_FINALIZADO)]
self.get('finalizado')
charlas = self.get_context('charlas')
self.assertEqual(len(qs), charlas.count())
for i,model in enumerate(charlas):
self.assertEqual(qs[i], model)
class ViewTemplateTestCase(TestCase):
def setUp(self):
self.user = UserFactory()
def test_index_template(self):
response = self.get("index")
self.assertTemplateUsed(response,"charla/index.html")
def test_get_registrar_charla(self):
with self.login(username=self.user.username, password='1234'):
response = self.get('registrar_charla')
self.assertTemplateUsed(response, "charla/registrar.html")
| # -*- coding: utf-8 -*-
import unittest
from test_plus.test import TestCase
from ..factories.user import UserFactory
class ViewsTestCase(TestCase):
def setUp(self):
self.user = UserFactory()
def test_posibles_eventos(self):
self.get('index')
self.response_200()
def test_agendados_eventos(self):
self.get('agendado')
self.response_200()
def test_finalizados_eventos(self):
self.get('finalizado')
self.response_200()
def test_get_registrar_charla(self):
self.assertLoginRequired('registrar_charla')
def test_post_registrar_charla(self):
with self.login(username=self.user.username, password='1234'):
response = self.post('registrar_charla',
data={"titulo": "charla 1",
"descripcion": "descripcion 1"})
self.response_200(response)
@unittest.expectedFailure
def test_fail_post_registrar_charla(self):
with self.login(username=self.user.username, password='1234'):
response = self.post('registrar_charla',
data={"titulo": "charla 1"})
self.response_200(response)
def test_user_name_in_index(self):
with self.login(username=self.user.username, password='1234'):
response = self.get("index")
self.assertContains(response,
'<span class="truncate">¡Hola! @{}</span>'.format(self.user.username),
status_code=200)
class ViewTemplateTestCase(TestCase):
def setUp(self):
self.user = UserFactory()
def test_index_template(self):
response = self.get("index")
self.assertTemplateUsed(response,"charla/index.html")
def test_get_registrar_charla(self):
with self.login(username=self.user.username, password='1234'):
response = self.get('registrar_charla')
self.assertTemplateUsed(response, "charla/registrar.html")
| Python | 0 |
fe3bb9440a46ae626c9bfd34882f3ad5823d7396 | drop unnecessary conn assignment | python/libvirt-override.py | python/libvirt-override.py | #
# Manually written part of python bindings for libvirt
#
# On cygwin, the DLL is called cygvirtmod.dll
try:
import libvirtmod
except ImportError, lib_e:
try:
import cygvirtmod as libvirtmod
except ImportError, cyg_e:
if str(cyg_e).count("No module named"):
raise lib_e
import types
# The root of all libvirt errors.
class libvirtError(Exception):
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None, vol=None):
# Never call virConnGetLastError().
# virGetLastError() is now thread local
err = virGetLastError()
if err is None:
msg = defmsg
else:
msg = err[2]
Exception.__init__(self, msg)
self.err = err
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
#
# register the libvirt global error handler
#
def registerErrorHandler(f, ctx):
"""Register a Python written function to for error reporting.
The function is called back as f(ctx, error), with error
being a list of information about the error being raised.
Returns 1 in case of success."""
return libvirtmod.virRegisterErrorHandler(f,ctx)
def openAuth(uri, auth, flags):
ret = libvirtmod.virConnectOpenAuth(uri, auth, flags)
if ret is None:raise libvirtError('virConnectOpenAuth() failed')
return virConnect(_obj=ret)
#
# Return library version.
#
def getVersion (name = None):
"""If no name parameter is passed (or name is None) then the
version of the libvirt library is returned as an integer.
If a name is passed and it refers to a driver linked to the
libvirt library, then this returns a tuple of (library version,
driver version).
If the name passed refers to a non-existent driver, then you
will get the exception 'no support for hypervisor'.
Versions numbers are integers: 1000000*major + 1000*minor + release."""
if name is None:
ret = libvirtmod.virGetVersion ();
else:
ret = libvirtmod.virGetVersion (name);
if ret is None: raise libvirtError ("virGetVersion() failed")
return ret
#
# Invoke an EventHandle callback
#
def eventInvokeHandleCallback (watch, fd, event, callback, opaque):
"""
Invoke the Event Impl Handle Callback in C
"""
libvirtmod.virEventInvokeHandleCallback(watch, fd, event, callback, opaque);
#
# Invoke an EventTimeout callback
#
def eventInvokeTimeoutCallback (timer, callback, opaque):
"""
Invoke the Event Impl Timeout Callback in C
"""
libvirtmod.virEventInvokeTimeoutCallback(timer, callback, opaque);
| #
# Manually written part of python bindings for libvirt
#
# On cygwin, the DLL is called cygvirtmod.dll
try:
import libvirtmod
except ImportError, lib_e:
try:
import cygvirtmod as libvirtmod
except ImportError, cyg_e:
if str(cyg_e).count("No module named"):
raise lib_e
import types
# The root of all libvirt errors.
class libvirtError(Exception):
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None, vol=None):
if dom is not None:
conn = dom._conn
elif net is not None:
conn = net._conn
elif pool is not None:
conn = pool._conn
elif vol is not None:
conn = vol._conn
# Never call virConnGetLastError().
# virGetLastError() is now thread local
err = virGetLastError()
if err is None:
msg = defmsg
else:
msg = err[2]
Exception.__init__(self, msg)
self.err = err
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
#
# register the libvirt global error handler
#
def registerErrorHandler(f, ctx):
"""Register a Python written function to for error reporting.
The function is called back as f(ctx, error), with error
being a list of information about the error being raised.
Returns 1 in case of success."""
return libvirtmod.virRegisterErrorHandler(f,ctx)
def openAuth(uri, auth, flags):
ret = libvirtmod.virConnectOpenAuth(uri, auth, flags)
if ret is None:raise libvirtError('virConnectOpenAuth() failed')
return virConnect(_obj=ret)
#
# Return library version.
#
def getVersion (name = None):
"""If no name parameter is passed (or name is None) then the
version of the libvirt library is returned as an integer.
If a name is passed and it refers to a driver linked to the
libvirt library, then this returns a tuple of (library version,
driver version).
If the name passed refers to a non-existent driver, then you
will get the exception 'no support for hypervisor'.
Versions numbers are integers: 1000000*major + 1000*minor + release."""
if name is None:
ret = libvirtmod.virGetVersion ();
else:
ret = libvirtmod.virGetVersion (name);
if ret is None: raise libvirtError ("virGetVersion() failed")
return ret
#
# Invoke an EventHandle callback
#
def eventInvokeHandleCallback (watch, fd, event, callback, opaque):
"""
Invoke the Event Impl Handle Callback in C
"""
libvirtmod.virEventInvokeHandleCallback(watch, fd, event, callback, opaque);
#
# Invoke an EventTimeout callback
#
def eventInvokeTimeoutCallback (timer, callback, opaque):
"""
Invoke the Event Impl Timeout Callback in C
"""
libvirtmod.virEventInvokeTimeoutCallback(timer, callback, opaque);
| Python | 0 |
a961e11c5b3666f2504cf2a0d46028b5957cb9bf | Fix doctest | qnet/misc/testing_tools.py | qnet/misc/testing_tools.py | """Collection of routines needed for testing. This includes proto-fixtures,
i.e. routines that should be imported and then turned into a fixture with the
pytest.fixture decorator.
See <https://pytest.org/latest/fixture.html>
"""
import os
from glob import glob
from collections import OrderedDict
from distutils import dir_util
from qnet.misc.trajectory_data import TrajectoryData
def datadir(tmpdir, request):
'''Proto-fixture responsible for searching a folder with the same name of
test module and, if available, moving all contents to a temporary directory
so tests can use them freely.
In any test, import the datadir routine and turn it into a fixture:
>>> import pytest
>>> import qnet.misc.testing_tools
>>> datadir = pytest.fixture(qnet.misc.testing_tools.datadir)
'''
# http://stackoverflow.com/questions/29627341/pytest-where-to-store-expected-data
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return str(tmpdir)
def qsd_traj(datadir, folder, seed):
"""Return a proto-fixture that returns a TrajectoryData instance based on
all the *.out file in the given folder (relative to the test datadir), and
with the given seed.
The returned function should be turned into a fixture:
>>> import pytest
>>> import qnet.misc.testing_tools
>>> from qnet.misc.testing_tools import qsd_traj
>>> datadir = pytest.fixture(qnet.misc.testing_tools.datadir)
>>> traj1 = pytest.fixture(qsd_traj(datadir, 'traj1', 102121))
"""
def proto_fixture(datadir):
operators = OrderedDict()
datafiles = sorted(glob(os.path.join(datadir, folder, '*.out')))
assert len(datafiles) >0, "No files *.out in %s"%folder
for file in datafiles:
op_name = os.path.splitext(os.path.split(file)[1])[0]
operators[op_name] = file
return TrajectoryData.from_qsd_data(operators, seed=seed)
import pytest # local import, so that qnet can be installed w/o pytest
return proto_fixture
| """Collection of routines needed for testing. This includes proto-fixtures,
i.e. routines that should be imported and then turned into a fixture with the
pytest.fixture decorator.
See <https://pytest.org/latest/fixture.html>
"""
import os
from glob import glob
from collections import OrderedDict
from distutils import dir_util
from qnet.misc.trajectory_data import TrajectoryData
def datadir(tmpdir, request):
'''Proto-fixture responsible for searching a folder with the same name of
test module and, if available, moving all contents to a temporary directory
so tests can use them freely.
In any test, import the datadir routine and turn it into a fixture:
>>> import pytest
>>> import qnet.misc.testing
>>> datadir = pytest.fixture(qnet.misc.testing.datadir)
'''
# http://stackoverflow.com/questions/29627341/pytest-where-to-store-expected-data
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return str(tmpdir)
def qsd_traj(datadir, folder, seed):
"""Return a proto-fixture that returns a TrajectoryData instance based on
all the *.out file in the given folder (relative to the test datadir), and
with the given seed.
The returned function should be turned into a fixture:
>>> import pytest
>>> import qnet.misc.testing
>>> datadir = pytest.fixture(qnet.misc.testing.datadir)
>>> traj1 = pytest.fixture(qsd_traj(datadir, 'traj1', 102121))
"""
def proto_fixture(datadir):
operators = OrderedDict()
datafiles = sorted(glob(os.path.join(datadir, folder, '*.out')))
assert len(datafiles) >0, "No files *.out in %s"%folder
for file in datafiles:
op_name = os.path.splitext(os.path.split(file)[1])[0]
operators[op_name] = file
return TrajectoryData.from_qsd_data(operators, seed=seed)
import pytest # local import, so that qnet can be installed w/o pytest
return proto_fixture
| Python | 0.000002 |
a6b49b92bd942655c0fe9a1c745e53ea19e070b5 | create a new django custom tag to replace a substring in a global string | src/alfanous-django/wui/templatetags/custom_filters.py | src/alfanous-django/wui/templatetags/custom_filters.py | '''
Created on Dec 29, 2012
@author: assem
'''
from django.template import Library
register = Library()
@register.filter
def get_range( value ):
""" make a range from a number starting of 1 """
return range( 1, value + 1 )
@register.filter
def space_split( str ):
""" split a string counting on spaces """
return str.split()
@register.simple_tag
def string_replace( string, oldword, newword ):
""" replace all occurrences of oldword in string by newword """
return string.replace( oldword, newword )
@register.simple_tag
def build_search_link( params, query, page, filter ):
""" build a search link based on a new query
usage: {% build_search_link params query filter %}link</a>
"""
# create a mutuable params object
new_params = {}
for k,v in params.items():
new_params[k]=v
# update params
new_params["page"] = page
new_params["sortedby"] = "mushaf"
if filter == "True" and params["query"] != query:
new_params["query"] = "(" + params["query"] + ") + " + query;
else:
new_params["query"] = query;
return build_params( new_params )
def build_params(params):
""" Concatenate the params to build a url GET request """
get_request = ""
for k, v in params.items():
get_request = get_request + unicode( k ) + "=" + unicode( v ) + "&"
return get_request[:-1]
| '''
Created on Dec 29, 2012
@author: assem
'''
from django.template import Library
register = Library()
@register.filter
def get_range( value ):
""" make a range from a number starting of 1 """
return range( 1, value + 1 )
@register.filter
def space_split( str ):
""" split a string counting on spaces """
return str.split()
@register.simple_tag
def build_search_link( params, query, page, filter ):
""" build a search link based on a new query
usage: {% build_search_link params query filter %}link</a>
"""
# create a mutuable params object
new_params = {}
for k,v in params.items():
new_params[k]=v
# update params
new_params["page"] = page
new_params["sortedby"] = "mushaf"
if filter == "True" and params["query"] != query:
new_params["query"] = "(" + params["query"] + ") + " + query;
else:
new_params["query"] = query;
return build_params( new_params )
def build_params(params):
""" Concatenate the params to build a url GET request """
get_request = ""
for k, v in params.items():
get_request = get_request + unicode( k ) + "=" + unicode( v ) + "&"
return get_request[:-1]
| Python | 0.000777 |
728cfe8e3c40ecd4e0128030d1d66864816626c8 | use single pipe to avoid problems with Jenkins reading them concurrently (#552) | ros_buildfarm/catkin_workspace.py | ros_buildfarm/catkin_workspace.py | # Copyright 2014-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
def ensure_workspace_exists(workspace_root):
# ensure that workspace exists
assert os.path.exists(workspace_root), \
"Workspace root '%s' does not exist" % workspace_root
source_space = os.path.join(workspace_root, 'src')
assert os.path.exists(source_space), \
"Source space '%s' does not exist" % source_space
def clean_workspace(workspace_root):
# clean up build, devel and install spaces
build_space = os.path.join(workspace_root, 'build_isolated')
if os.path.exists(build_space):
shutil.rmtree(build_space)
devel_space = os.path.join(workspace_root, 'devel_isolated')
if os.path.exists(devel_space):
shutil.rmtree(devel_space)
install_space = os.path.join(workspace_root, 'install_isolated')
if os.path.exists(install_space):
shutil.rmtree(install_space)
test_results_dir = os.path.join(workspace_root, 'test_results')
if os.path.exists(test_results_dir):
shutil.rmtree(test_results_dir)
def call_catkin_make_isolated(
rosdistro_name, workspace_root, args, parent_result_spaces=None):
# command to run
script_name = 'catkin_make_isolated'
# use script from source space if available
source_space = os.path.join(workspace_root, 'src')
script_from_source = os.path.join(
source_space, 'catkin', 'bin', script_name)
if os.path.exists(script_from_source):
script_name = script_from_source
cmd = ' '.join(
['PYTHONIOENCODING=utf_8', 'PYTHONUNBUFFERED=1', script_name] + args)
# prepend setup files if available
if parent_result_spaces is None:
parent_result_spaces = ['/opt/ros/%s' % rosdistro_name]
for parent_result_space in reversed(parent_result_spaces):
setup_file = os.path.join(parent_result_space, 'setup.sh')
if os.path.exists(setup_file):
cmd = '. %s && %s' % (setup_file, cmd)
print("Invoking '%s' in '%s'" % (cmd, workspace_root))
return subprocess.call(
cmd, cwd=workspace_root, shell=True, stderr=subprocess.STDOUT)
| # Copyright 2014-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
def ensure_workspace_exists(workspace_root):
# ensure that workspace exists
assert os.path.exists(workspace_root), \
"Workspace root '%s' does not exist" % workspace_root
source_space = os.path.join(workspace_root, 'src')
assert os.path.exists(source_space), \
"Source space '%s' does not exist" % source_space
def clean_workspace(workspace_root):
# clean up build, devel and install spaces
build_space = os.path.join(workspace_root, 'build_isolated')
if os.path.exists(build_space):
shutil.rmtree(build_space)
devel_space = os.path.join(workspace_root, 'devel_isolated')
if os.path.exists(devel_space):
shutil.rmtree(devel_space)
install_space = os.path.join(workspace_root, 'install_isolated')
if os.path.exists(install_space):
shutil.rmtree(install_space)
test_results_dir = os.path.join(workspace_root, 'test_results')
if os.path.exists(test_results_dir):
shutil.rmtree(test_results_dir)
def call_catkin_make_isolated(
rosdistro_name, workspace_root, args, parent_result_spaces=None):
# command to run
script_name = 'catkin_make_isolated'
# use script from source space if available
source_space = os.path.join(workspace_root, 'src')
script_from_source = os.path.join(
source_space, 'catkin', 'bin', script_name)
if os.path.exists(script_from_source):
script_name = script_from_source
cmd = ' '.join(
['PYTHONIOENCODING=utf_8', 'PYTHONUNBUFFERED=1', script_name] + args)
# prepend setup files if available
if parent_result_spaces is None:
parent_result_spaces = ['/opt/ros/%s' % rosdistro_name]
for parent_result_space in reversed(parent_result_spaces):
setup_file = os.path.join(parent_result_space, 'setup.sh')
if os.path.exists(setup_file):
cmd = '. %s && %s' % (setup_file, cmd)
print("Invoking '%s' in '%s'" % (cmd, workspace_root))
return subprocess.call(cmd, cwd=workspace_root, shell=True)
| Python | 0 |
8369d189e822fa7496864cac4ddc906bf7c05fe3 | Convert gaphor/UML/classes/tests/test_interface.py to pytest | gaphor/UML/classes/tests/test_interface.py | gaphor/UML/classes/tests/test_interface.py | """Test classes."""
from gaphor import UML
from gaphor.UML.classes.interface import Folded, InterfaceItem
class TestInterface:
def test_interface_creation(self, case):
"""Test interface creation."""
iface = case.create(InterfaceItem, UML.Interface)
assert isinstance(iface.subject, UML.Interface)
def test_folded_interface_persistence(self, case):
"""Test folded interface saving/loading."""
iface = case.create(InterfaceItem, UML.Interface)
# note: assembly folded mode..
iface.folded = Folded.REQUIRED
data = case.save()
case.load(data)
interfaces = list(case.diagram.select(InterfaceItem))
assert len(interfaces) == 1
# ... gives provided folded mode on load;
# correct folded mode is determined by connections, which will be
# recreated later, i.e. required folded mode will be set when
# implementation connects to the interface and Folded.PROVIDED
# is equal to interfaces[0].folded
| """Test classes."""
from gaphor import UML
from gaphor.tests import TestCase
from gaphor.UML.classes.interface import Folded, InterfaceItem
class InterfaceTestCase(TestCase):
def test_interface_creation(self):
"""Test interface creation."""
iface = self.create(InterfaceItem, UML.Interface)
assert isinstance(iface.subject, UML.Interface)
def test_folded_interface_persistence(self):
"""Test folded interface saving/loading."""
iface = self.create(InterfaceItem, UML.Interface)
# note: assembly folded mode..
iface.folded = Folded.REQUIRED
data = self.save()
self.load(data)
interfaces = list(self.diagram.select(InterfaceItem))
assert len(interfaces) == 1
# ... gives provided folded mode on load;
# correct folded mode is determined by connections, which will be
# recreated later, i.e. required folded mode will be set when
# implementation connects to the interface and Folded.PROVIDED
# is equal to interfaces[0].folded
| Python | 0.999999 |
03734b5f42a448e20f5926dd6ffc24cc40dc004e | Remove unused methods | src/foremast/pipeline/construct_pipeline_block_datapipeline.py | src/foremast/pipeline/construct_pipeline_block_datapipeline.py | # Foremast - Pipeline Tooling
#
# Copyright 2016 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Construct a block section of Stages in a Spinnaker Pipeline."""
import copy
import logging
from pprint import pformat
from ..utils import get_template
LOG = logging.getLogger(__name__)
def construct_pipeline_block_datapipeline(env='',
generated=None,
previous_env=None,
region='us-east-1',
settings=None,
pipeline_data=None):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
if env.startswith('prod'):
template_name = 'pipeline/pipeline_{}_datapipeline.json.j2'.format(env)
else:
template_name = 'pipeline/pipeline_stages_datapipeline.json.j2'
LOG.debug('%s info:\n%s', env, pformat(settings))
gen_app_name = generated.app_name()
data = copy.deepcopy(settings)
data['app'].update({
'appname': gen_app_name,
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'previous_env': previous_env,
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email']
})
LOG.debug('Block data:\n%s', pformat(data))
pipeline_json = get_template(template_file=template_name, data=data)
return pipeline_json
| # Foremast - Pipeline Tooling
#
# Copyright 2016 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Construct a block section of Stages in a Spinnaker Pipeline."""
import copy
import logging
from pprint import pformat
from ..utils import generate_encoded_user_data, get_template
LOG = logging.getLogger(__name__)
def construct_pipeline_block_datapipeline(env='',
generated=None,
previous_env=None,
region='us-east-1',
settings=None,
pipeline_data=None):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
if env.startswith('prod'):
template_name = 'pipeline/pipeline_{}_datapipeline.json.j2'.format(env)
else:
template_name = 'pipeline/pipeline_stages_datapipeline.json.j2'
LOG.debug('%s info:\n%s', env, pformat(settings))
gen_app_name = generated.app_name()
data = copy.deepcopy(settings)
data['app'].update({
'appname': gen_app_name,
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'previous_env': previous_env,
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email']
})
LOG.debug('Block data:\n%s', pformat(data))
pipeline_json = get_template(template_file=template_name, data=data)
return pipeline_json
| Python | 0.000034 |
a842439edb47524b64345d3a893199f3b92f2b14 | Fix top-level domain extraction from site name. | google_analytics/templatetags/analytics.py | google_analytics/templatetags/analytics.py | from django import template
from django.db import models
from django.contrib.sites.models import Site
from django.template import Context, loader
register = template.Library()
Analytics = models.get_model('google_analytics', 'analytic')
def do_get_analytics(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, code = token.split_contents()
except ValueError:
code = None
if not code:
current_site = Site.objects.get_current()
else:
if not (code[0] == code[-1] and code[0] in ('"', "'")):
raise template.TemplateSyntaxError, "%r tag's argument should be in quotes" % tag_name
code = code[1:-1]
current_site = None
return AnalyticsNode(current_site, code)
class AnalyticsNode(template.Node):
def __init__(self, site=None, code=None):
self.site = site
self.code = code
def render(self, context):
content = ''
if self.site:
code_set = Analytics.objects.filter(site=self.site)
if code_set:
code = code_set[0].analytics_code
else:
return ''
elif self.code:
code = self.code
else:
return ''
if code.strip() != '':
t = loader.get_template('google_analytics/analytics_template.html')
c = Context({
'analytics_code': code,
'domain': '.'.join(self.site.domain.split('.')[-2:])
})
return t.render(c)
else:
return ''
register.tag('analytics', do_get_analytics)
| from django import template
from django.db import models
from django.contrib.sites.models import Site
from django.template import Context, loader
register = template.Library()
Analytics = models.get_model('google_analytics', 'analytic')
def do_get_analytics(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, code = token.split_contents()
except ValueError:
code = None
if not code:
current_site = Site.objects.get_current()
else:
if not (code[0] == code[-1] and code[0] in ('"', "'")):
raise template.TemplateSyntaxError, "%r tag's argument should be in quotes" % tag_name
code = code[1:-1]
current_site = None
return AnalyticsNode(current_site, code)
class AnalyticsNode(template.Node):
def __init__(self, site=None, code=None):
self.site = site
self.code = code
def render(self, context):
content = ''
if self.site:
code_set = Analytics.objects.filter(site=self.site)
if code_set:
code = code_set[0].analytics_code
else:
return ''
elif self.code:
code = self.code
else:
return ''
if code.strip() != '':
t = loader.get_template('google_analytics/analytics_template.html')
c = Context({
'analytics_code': code,
'domain': '.'.join(self.site.domain.split('.')[2:])
})
return t.render(c)
else:
return ''
register.tag('analytics', do_get_analytics)
| Python | 0 |
0d2f35ddc27cf4c7155a4d1648c0bbfe0ff3a528 | Fix the bool name in the array API namespace | numpy/_array_api/dtypes.py | numpy/_array_api/dtypes.py | from .. import int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64
# Note: This name is changed
from .. import bool_ as bool
__all__ = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'bool']
| from .. import int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, bool
__all__ = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'bool']
| Python | 0.99996 |
ff611c4a41cdbaf9b3306650a3d1bc4177b23bad | Update sst.py | banpei/sst.py | banpei/sst.py | import numpy as np
from banpei.base.model import Model
class SST(Model):
def __init__(self):
pass
def detect(self, data, w, m=2, k=None, L=None):
"""
Parameters
----------
data : array_like
Input array or object that can be converted to an array.
w : int
Window size
m : int
Number of basis vectors
k : int
Number of columns for the trajectory and test matrices
L : int
Lag time
Returns
-------
Numpy array contains the degree of change.
"""
# Set variables
data = self.convert_to_nparray(data)
if k is None:
k = w // 2
if L is None:
L = k // 2
T = len(data)
# Calculation range
start_cal = k + w
end_cal = T - L + 1
# Calculate the degree of change
change_scores = np.zeros(len(data))
for t in range(start_cal, end_cal + 1):
# Trajectory matrix
start_tra = t - w - k + 1
end_tra = t - w
tra_matrix = self._extract_matrix(data, start_tra, end_tra, w)
# Test matrix
start_test = start_tra + L
end_test = end_tra + L
test_matrix = self._extract_matrix(data, start_test, end_test, w)
# Singular value decomposition(SVD)
U_tra, _, _ = np.linalg.svd(tra_matrix, full_matrices=False)
U_test, _, _ = np.linalg.svd(test_matrix, full_matrices=False)
U_tra_m = U_tra[:, :m]
U_test_m = U_test[:, :m]
s = np.linalg.svd(np.dot(U_tra_m.T, U_test_m), full_matrices=False, compute_uv=False)
change_scores[t] = 1 - s[0]
return change_scores
def _extract_matrix(self, data, start, end, w):
row = w
column = end - start + 1
matrix = np.empty((row, column))
i = 0
for t in range(start, end+1):
matrix[:, i] = data[t-1:t-1+row]
i += 1
return matrix
| import numpy as np
from banpei.base.model import Model
class SST(Model):
def __init__(self):
pass
def _extract_matrix(self, data, start, end, w):
row = w
column = end - start + 1
matrix = np.empty((row, column))
i = 0
for t in range(start, end+1):
matrix[:, i] = data[t-1:t-1+row]
i += 1
return matrix
def detect(self, data, w, m=2, k=None, L=None):
"""
Parameters
----------
data : array_like
Input array or object that can be converted to an array.
w : int
Window size
m : int
Number of basis vectors
k : int
Number of columns for the trajectory and test matrices
L : int
Lag time
Returns
-------
Numpy array contains the degree of change.
"""
# Set variables
data = self.convert_to_nparray(data)
if k is None:
k = w // 2
if L is None:
L = k // 2
T = len(data)
# Calculation range
start_cal = k + w
end_cal = T - L + 1
# Calculate the degree of change
change_scores = np.zeros(len(data))
for t in range(start_cal, end_cal + 1):
# Trajectory matrix
start_tra = t - w - k + 1
end_tra = t - w
tra_matrix = self._extract_matrix(data, start_tra, end_tra, w)
# Test matrix
start_test = start_tra + L
end_test = end_tra + L
test_matrix = self._extract_matrix(data, start_test, end_test, w)
# Singular value decomposition(SVD)
U_tra, _, _ = np.linalg.svd(tra_matrix, full_matrices=False)
U_test, _, _ = np.linalg.svd(test_matrix, full_matrices=False)
U_tra_m = U_tra[:, :m]
U_test_m = U_test[:, :m]
_, s, _ = np.linalg.svd(np.dot(U_tra_m.T, U_test_m), full_matrices=False)
change_scores[t] = 1 - s[0] ** 2
return change_scores
| Python | 0.000001 |
ff268941bfc588e21a2f460c034e3c0a99837d23 | Fix migration order (post-rebase) | migrations/versions/201502111317_233928da84b2_create_video_conference_rooms.py | migrations/versions/201502111317_233928da84b2_create_video_conference_rooms.py | """Create video conference rooms
Revision ID: 233928da84b2
Revises: 50c2b5ee2726
Create Date: 2015-02-11 13:17:44.365589
"""
import sqlalchemy as sa
from alembic import op
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy import UTCDateTime
from indico.modules.vc.models.vc_rooms import VCRoomLinkType, VCRoomStatus
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '233928da84b2'
down_revision = '5583f647dff5'
def upgrade():
op.create_table('vc_rooms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('status', PyIntEnum(VCRoomStatus), nullable=False),
sa.Column('created_by_id', sa.Integer(), nullable=False, index=True),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.Column('modified_dt', UTCDateTime, nullable=True),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id'),
schema='events')
op.create_table('vc_room_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), autoincrement=False, nullable=False, index=True),
sa.Column('vc_room_id', sa.Integer(), nullable=False, index=True),
sa.Column('link_type', PyIntEnum(VCRoomLinkType), nullable=False),
sa.Column('link_id', sa.String(), nullable=True),
sa.Column('show', sa.Boolean(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.ForeignKeyConstraint(['vc_room_id'], ['events.vc_rooms.id']),
sa.PrimaryKeyConstraint('id'),
schema='events')
def downgrade():
op.drop_table('vc_room_events', schema='events')
op.drop_table('vc_rooms', schema='events')
| """Create video conference rooms
Revision ID: 233928da84b2
Revises: 50c2b5ee2726
Create Date: 2015-02-11 13:17:44.365589
"""
import sqlalchemy as sa
from alembic import op
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy import UTCDateTime
from indico.modules.vc.models.vc_rooms import VCRoomLinkType, VCRoomStatus
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '233928da84b2'
down_revision = '50c2b5ee2726'
def upgrade():
op.create_table('vc_rooms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('status', PyIntEnum(VCRoomStatus), nullable=False),
sa.Column('created_by_id', sa.Integer(), nullable=False, index=True),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.Column('modified_dt', UTCDateTime, nullable=True),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id'),
schema='events')
op.create_table('vc_room_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), autoincrement=False, nullable=False, index=True),
sa.Column('vc_room_id', sa.Integer(), nullable=False, index=True),
sa.Column('link_type', PyIntEnum(VCRoomLinkType), nullable=False),
sa.Column('link_id', sa.String(), nullable=True),
sa.Column('show', sa.Boolean(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.ForeignKeyConstraint(['vc_room_id'], ['events.vc_rooms.id']),
sa.PrimaryKeyConstraint('id'),
schema='events')
def downgrade():
op.drop_table('vc_room_events', schema='events')
op.drop_table('vc_rooms', schema='events')
| Python | 0 |
8d40cd3dab606d558806fa00b0ed5df73c457045 | Fix for issue #2. | bgui/frame.py | bgui/frame.py | from .gl_utils import *
from .widget import Widget, BGUI_DEFAULT
class Frame(Widget):
"""Frame for storing other widgets"""
theme_section = 'Frame'
theme_options = {
'Color1': (0, 0, 0, 0),
'Color2': (0, 0, 0, 0),
'Color3': (0, 0, 0, 0),
'Color4': (0, 0, 0, 0),
'BorderSize': 0,
'BorderColor': (0, 0, 0, 1),
}
def __init__(self, parent, name=None, border=None, aspect=None, size=[1, 1], pos=[0, 0],
sub_theme='', options=BGUI_DEFAULT):
"""
:param parent: the widget's parent
:param name: the name of the widget
:param border: the size of the border around the frame (0 for no border)
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
#: The colors for the four corners of the frame.
self.colors = [
self.theme['Color1'],
self.theme['Color2'],
self.theme['Color3'],
self.theme['Color4']
]
#: The color of the border around the frame.
self.border_color = self.theme['BorderColor']
#: The size of the border around the frame.
if border is not None:
self.border = border
else:
self.border = self.theme['BorderSize']
def _draw(self):
"""Draw the frame"""
# Enable alpha blending
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Enable polygon offset
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0, 1.0)
glBegin(GL_QUADS)
for i in range(4):
glColor4f(self.colors[i][0], self.colors[i][1], self.colors[i][2], self.colors[i][3])
glVertex2f(self.gl_position[i][0], self.gl_position[i][1])
glEnd()
glDisable(GL_POLYGON_OFFSET_FILL)
# Draw an outline
if self.border > 0:
# border = self.border/2
r, g, b, a = self.border_color
glColor4f(r, g, b, a)
glPolygonMode(GL_FRONT, GL_LINE)
glLineWidth(self.border)
glBegin(GL_QUADS)
for i in range(4):
glVertex2f(self.gl_position[i][0], self.gl_position[i][1])
glEnd()
glLineWidth(1.0)
glPolygonMode(GL_FRONT, GL_FILL)
Widget._draw(self)
| from .gl_utils import *
from .widget import Widget, BGUI_DEFAULT
class Frame(Widget):
"""Frame for storing other widgets"""
theme_section = 'Frame'
theme_options = {
'Color1': (0, 0, 0, 0),
'Color2': (0, 0, 0, 0),
'Color3': (0, 0, 0, 0),
'Color4': (0, 0, 0, 0),
'BorderSize': 0,
'BorderColor': (0, 0, 0, 1),
}
def __init__(self, parent, name=None, border=None, aspect=None, size=[1, 1], pos=[0, 0],
sub_theme='', options=BGUI_DEFAULT):
"""
:param parent: the widget's parent
:param name: the name of the widget
:param border: the size of the border around the frame (0 for no border)
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
#: The colors for the four corners of the frame.
self.colors = [
self.theme['Color1'],
self.theme['Color2'],
self.theme['Color3'],
self.theme['Color4']
]
#: The color of the border around the frame.
self.border_color = self.theme['BorderColor']
#: The size of the border around the frame.
self.border = border if border else self.theme['BorderSize']
def _draw(self):
"""Draw the frame"""
# Enable alpha blending
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Enable polygon offset
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0, 1.0)
glBegin(GL_QUADS)
for i in range(4):
glColor4f(self.colors[i][0], self.colors[i][1], self.colors[i][2], self.colors[i][3])
glVertex2f(self.gl_position[i][0], self.gl_position[i][1])
glEnd()
glDisable(GL_POLYGON_OFFSET_FILL)
# Draw an outline
if self.border > 0:
# border = self.border/2
r, g, b, a = self.border_color
glColor4f(r, g, b, a)
glPolygonMode(GL_FRONT, GL_LINE)
glLineWidth(self.border)
glBegin(GL_QUADS)
for i in range(4):
glVertex2f(self.gl_position[i][0], self.gl_position[i][1])
glEnd()
glLineWidth(1.0)
glPolygonMode(GL_FRONT, GL_FILL)
Widget._draw(self)
| Python | 0 |
7a0560d8bd9dcb421b54522df92618d439941e69 | Change bill detail page to use session and identifier | bills/urls.py | bills/urls.py | from . import views
from django.conf.urls import url
urlpatterns = [
url(r'^by_topic/', views.bill_list_by_topic),
url(r'^by_location', views.bill_list_by_location),
url(r'^latest_activity/', views.latest_bill_activity),
url(r'^latest/', views.latest_bill_actions),
url(r'^detail/(?P<bill_session>(.*))/(?P<bill_identifier>(.*))/$', views.bill_detail, name='bill_detail'),
]
| from . import views
from django.conf.urls import url
urlpatterns = [
url(r'^by_topic/', views.bill_list_by_topic),
url(r'^by_location', views.bill_list_by_location),
url(r'^latest_activity/', views.latest_bill_activity),
url(r'^latest/', views.latest_bill_actions),
url(r'^detail/(?P<bill_id>(.*))/$', views.bill_detail, name='bill_detail'),
]
| Python | 0 |
14e2d2282b7c95a1bb6d475faa6d827d90609e16 | Define PostAdmin list_display. | blog/admin.py | blog/admin.py | from django.contrib import admin
from .models import Post
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'pub_date')
| from django.contrib import admin
from .models import Post
admin.site.register(Post)
| Python | 0 |
0d389018353f03d79332a1b40d6dc1881df91cd0 | Fix sorting of items in RSS feed | blog/views.py | blog/views.py | from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from .models import BlogIndexPage, BlogPage, BlogCategory
from django.shortcuts import get_object_or_404
from django.conf import settings
def tag_view(request, tag):
index = BlogIndexPage.objects.first()
return index.serve(request, tag=tag)
def category_view(request, category):
index = BlogIndexPage.objects.first()
return index.serve(request, category=category)
def author_view(request, author):
index = BlogIndexPage.objects.first()
return index.serve(request, author=author)
class LatestEntriesFeed(Feed):
'''
If a URL ends with "rss" try to find a matching BlogIndexPage
and return its items.
'''
def get_object(self, request, blog_slug):
return get_object_or_404(BlogIndexPage, slug=blog_slug)
def title(self, blog):
if blog.seo_title:
return blog.seo_title
return blog.title
def link(self, blog):
return blog.full_url
def description(self, blog):
return blog.search_description
def items(self, blog):
num = getattr(settings, 'BLOG_PAGINATION_PER_PAGE', 10)
return blog.get_descendants().order_by('-first_published_at')[:num]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.specific.body
def item_link(self, item):
return item.full_url
class LatestEntriesFeedAtom(LatestEntriesFeed):
feed_type = Atom1Feed
class LatestCategoryFeed(Feed):
description = "A Blog"
def title(self, category):
return "Blog: " + category.name
def link(self, category):
return "/blog/category/" + category.slug
def get_object(self, request, category):
return get_object_or_404(BlogCategory, slug=category)
def items(self, obj):
return BlogPage.objects.filter(
categories__category=obj).order_by('-date')[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.body
| from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from .models import BlogIndexPage, BlogPage, BlogCategory
from django.shortcuts import get_object_or_404
from django.conf import settings
def tag_view(request, tag):
index = BlogIndexPage.objects.first()
return index.serve(request, tag=tag)
def category_view(request, category):
index = BlogIndexPage.objects.first()
return index.serve(request, category=category)
def author_view(request, author):
index = BlogIndexPage.objects.first()
return index.serve(request, author=author)
class LatestEntriesFeed(Feed):
'''
If a URL ends with "rss" try to find a matching BlogIndexPage
and return its items.
'''
def get_object(self, request, blog_slug):
return get_object_or_404(BlogIndexPage, slug=blog_slug)
def title(self, blog):
if blog.seo_title:
return blog.seo_title
return blog.title
def link(self, blog):
return blog.full_url
def description(self, blog):
return blog.search_description
def items(self, blog):
num = getattr(settings, 'BLOG_PAGINATION_PER_PAGE', 10)
return blog.get_descendants()[:num]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.specific.body
def item_link(self, item):
return item.full_url
class LatestEntriesFeedAtom(LatestEntriesFeed):
feed_type = Atom1Feed
class LatestCategoryFeed(Feed):
description = "A Blog"
def title(self, category):
return "Blog: " + category.name
def link(self, category):
return "/blog/category/" + category.slug
def get_object(self, request, category):
return get_object_or_404(BlogCategory, slug=category)
def items(self, obj):
return BlogPage.objects.filter(
categories__category=obj).order_by('-date')[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.body
| Python | 0 |
3e2cbd52a916b767473335427702ecf3bae5a51d | create dir if image_filter not exist | CapturePictures.py | CapturePictures.py | #!/usr/bin/env python
import cv2
import os
import argparse
def capturePicturesByCamera(num = 300, saveDir = "./image_filter/"):
"""
Capture pictures with faces detected.
Args:
num (int): The number of pictures to capture. Default: 300.
saveDir (str): The directory to save the captured pictures. Default: "./image_filter/". Note: Please make sure the directory has been created.
Returns:
void
Todo:
* Handling of file path construction.
* Disable logging of cv2.
"""
face_cascade = cv2.CascadeClassifier('opencv_config/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
count = 1
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Detect faces in the gray frame
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
# The frame will be saved when the faces are detected
if len(faces) > 0:
# Save frame as JPEG file
frame_file_path = saveDir + ("frame%d.jpg" % count)
cv2.imwrite(frame_file_path, frame)
print("%d picture(s) captured & saved!" % count)
count += 1
# Draw rectangles which point out the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the captured frame
cv2.imshow('Camera', frame)
# Wait for 'q' on the Camera window to quit before entire capturing job finished
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if count > num:
cv2.destroyAllWindows()
break
cap.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Capture pictures with faces detected.')
parser.add_argument('-n', type=int, help='the number of pictures to capture. Default: 300')
parser.add_argument('-d', type=str, help='the directory to save the captured pictures. Default: "./image_filter/". Note: Please make sure the directory has been created')
parser.set_defaults(n = 300, d = "./image_filter/")
args = parser.parse_args()
if not os.path.exists("./image_filter"):
os.makedirs('image_filter/after')
# Start the capturing
capturePicturesByCamera(args.n, args.d)
| #!/usr/bin/env python
import cv2
import sys
import argparse
def capturePicturesByCamera(num = 300, saveDir = "./image_filter/"):
"""
Capture pictures with faces detected.
Args:
num (int): The number of pictures to capture. Default: 300.
saveDir (str): The directory to save the captured pictures. Default: "./image_filter/". Note: Please make sure the directory has been created.
Returns:
void
Todo:
* Handling of file path construction.
* Disable logging of cv2.
"""
face_cascade = cv2.CascadeClassifier('opencv_config/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
count = 1
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Detect faces in the gray frame
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
# The frame will be saved when the faces are detected
if len(faces) > 0:
# Save frame as JPEG file
frame_file_path = saveDir + ("frame%d.jpg" % count)
cv2.imwrite(frame_file_path, frame)
print("%d picture(s) captured & saved!" % count)
count += 1
# Draw rectangles which point out the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the captured frame
cv2.imshow('Camera', frame)
# Wait for 'q' on the Camera window to quit before entire capturing job finished
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if count > num:
cv2.destroyAllWindows()
break
cap.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Capture pictures with faces detected.')
parser.add_argument('-n', type=int, help='the number of pictures to capture. Default: 300')
parser.add_argument('-d', type=str, help='the directory to save the captured pictures. Default: "./image_filter/". Note: Please make sure the directory has been created')
parser.set_defaults(n = 300, d = "./image_filter/")
args = parser.parse_args()
# Start the capturing
capturePicturesByCamera(args.n, args.d)
| Python | 0.000008 |
d9f623baaa8e1d1075f9132108ed7bb11eea39b0 | Replace dask.get from core.get to async.get_sync | dask/__init__.py | dask/__init__.py | from __future__ import absolute_import, division, print_function
from .core import istask
from .context import set_options
from .async import get_sync as get
try:
from .imperative import do, value
except ImportError:
pass
__version__ = '0.7.3'
| from __future__ import absolute_import, division, print_function
from .core import istask, get
from .context import set_options
try:
from .imperative import do, value
except ImportError:
pass
__version__ = '0.7.3'
| Python | 0 |
de4f02fff4b23a442abe3062c2da4c52d8823627 | Fix spurious deprecation warning for fatal_warnings (#6237) | src/python/pants/backend/jvm/subsystems/zinc_language_mixin.py | src/python/pants/backend/jvm/subsystems/zinc_language_mixin.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from pants.base.deprecated import deprecated
class ZincLanguageMixin(object):
"""A mixin for subsystems for languages compiled with Zinc."""
@classmethod
def register_options(cls, register):
super(ZincLanguageMixin, cls).register_options(register)
# NB: This option is fingerprinted because the default value is not included in a target's
# fingerprint. This also has the effect of invalidating only the relevant tasks: ZincCompile
# in this case.
register('--strict-deps', advanced=True, default=False, fingerprint=True, type=bool,
help='The default for the "strict_deps" argument for targets of this language.')
register('--fatal-warnings', advanced=True, type=bool,
fingerprint=True,
removal_version='1.11.0.dev0',
removal_hint='Use --compiler-option-sets=fatal_warnings instead of fatal_warnings',
help='The default for the "fatal_warnings" argument for targets of this language.')
register('--compiler-option-sets', advanced=True, default=[], type=list,
fingerprint=True,
help='The default for the "compiler_option_sets" argument '
'for targets of this language.')
register('--zinc-file-manager', advanced=True, default=True, type=bool,
fingerprint=True,
help='Use zinc provided file manager to ensure transactional rollback.')
@property
def strict_deps(self):
"""When True, limits compile time deps to those that are directly declared by a target.
:rtype: bool
"""
return self.get_options().strict_deps
@property
@deprecated('1.11.0.dev0', 'Consume fatal_warnings from compiler_option_sets instead.')
def fatal_warnings(self):
"""If true, make warnings fatal for targets that do not specify fatal_warnings.
:rtype: bool
"""
return self.get_options().fatal_warnings
@property
def compiler_option_sets(self):
"""For every element in this list, enable the corresponding flags on compilation
of targets.
:rtype: list
"""
option_sets = self.get_options().compiler_option_sets
if 'fatal_warnings' not in option_sets and self.get_options().fatal_warnings:
option_sets.append('fatal_warnings')
return option_sets
@property
def zinc_file_manager(self):
"""If false, the default file manager will be used instead of the zinc provided one.
:rtype: bool
"""
return self.get_options().zinc_file_manager
| # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from pants.base.deprecated import deprecated
class ZincLanguageMixin(object):
"""A mixin for subsystems for languages compiled with Zinc."""
@classmethod
def register_options(cls, register):
super(ZincLanguageMixin, cls).register_options(register)
# NB: This option is fingerprinted because the default value is not included in a target's
# fingerprint. This also has the effect of invalidating only the relevant tasks: ZincCompile
# in this case.
register('--strict-deps', advanced=True, default=False, fingerprint=True, type=bool,
help='The default for the "strict_deps" argument for targets of this language.')
register('--fatal-warnings', advanced=True, type=bool,
fingerprint=True,
removal_version='1.11.0.dev0',
removal_hint='Use --compiler-option-sets=fatal_warnings instead of fatal_warnings',
help='The default for the "fatal_warnings" argument for targets of this language.')
register('--compiler-option-sets', advanced=True, default=[], type=list,
fingerprint=True,
help='The default for the "compiler_option_sets" argument '
'for targets of this language.')
register('--zinc-file-manager', advanced=True, default=True, type=bool,
fingerprint=True,
help='Use zinc provided file manager to ensure transactional rollback.')
@property
def strict_deps(self):
"""When True, limits compile time deps to those that are directly declared by a target.
:rtype: bool
"""
return self.get_options().strict_deps
@property
@deprecated('1.11.0.dev0', 'Consume fatal_warnings from compiler_option_sets instead.')
def fatal_warnings(self):
"""If true, make warnings fatal for targets that do not specify fatal_warnings.
:rtype: bool
"""
return self.get_options().fatal_warnings
@property
def compiler_option_sets(self):
"""For every element in this list, enable the corresponding flags on compilation
of targets.
:rtype: list
"""
option_sets = self.get_options().compiler_option_sets
if 'fatal_warnings' not in option_sets and self.fatal_warnings:
option_sets.append('fatal_warnings')
return option_sets
@property
def zinc_file_manager(self):
"""If false, the default file manager will be used instead of the zinc provided one.
:rtype: bool
"""
return self.get_options().zinc_file_manager
| Python | 0 |
93b752a251b43c268a6becb53ab298e958a46aeb | add Category Field in template | awesomepose/posts/forms/post.py | awesomepose/posts/forms/post.py | from django import forms
from django.forms import ModelMultipleChoiceField
from django_summernote.widgets import SummernoteWidget, SummernoteInplaceWidget
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Fieldset, Button, Div
from crispy_forms.bootstrap import (
PrependedText, PrependedAppendedText, FormActions)
from mptt.forms import TreeNodeChoiceField
from posts.models import Post
from categories.models import Category
class PostForm(forms.ModelForm):
category = TreeNodeChoiceField(queryset=Category.objects.all(), level_indicator='----',)
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['category'].label = "카테고리"
self.fields['title'].label = "제목"
self.fields['content'].label = "상세 리뷰"
self.fields['product_url'].label = "구매 주소"
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.label_class = 'control-label'
self.helper.layout = Layout(
Field('category', css_class='form-control col-lg-8', placeholder="제목을 입력해 주세요"),
Field('title', css_class='form-control', placeholder="제목을 입력해 주세요"),
Field('content', css_class='form-control', ),
Field('product_url', css_class='form-control', placeholder="구매처의 주소를 붙여넣어 주세요"),
FormActions(Submit('save', '저장하기', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-default')
),
)
class Meta:
model = Post
widgets = {
'title': forms.TextInput(),
'content': SummernoteInplaceWidget(
),
'product_url': forms.TextInput(),
}
fields = ['category', 'title', 'content', 'product_url']
field_classes = {
'category': TreeNodeChoiceField,
}
| from django import forms
from django_summernote.widgets import SummernoteWidget, SummernoteInplaceWidget
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Fieldset, Button
from crispy_forms.bootstrap import (
PrependedText, PrependedAppendedText, FormActions)
from posts.models import Post
class PostForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['title'].label = "제목"
self.fields['content'].label = "상세 리뷰"
self.fields['product_url'].label = "구매 주소"
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.label_class = 'control-label'
self.helper.layout = Layout(
Field('title', css_class='form-control', placeholder="제목을 입력해 주세요"),
Field('content', css_class='form-control', ),
Field('product_url', css_class='form-control', placeholder="구매처의 주소를 붙여넣어 주세요"),
FormActions(Submit('save', '저장하기', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-default')
),
)
class Meta:
model = Post
widgets = {
'title': forms.TextInput(),
'content': SummernoteInplaceWidget(
),
'product_url': forms.TextInput(),
}
fields = ['title', 'content', 'product_url']
| Python | 0 |
8a26eddf5c8d0913f15722a59542cd1dccfbbad4 | fix reference to instance var | Conficat/Config.py | Conficat/Config.py | #!/usr/bin/env python
"""
Conficat internal configuration class
"""
import os
import re
import sys
import logging
from Cheetah.ImportHooks import install as cheetah_import_install
from TemplateRegistry import TemplateRegistry
from CSVDataSource import CSVDataSource
from ConfigError import ConfigError
class Config(object):
"""Conficat configuration class. Refer to CLI.py for an example"""
def __init__(self):
super(Config, self).__init__()
self.logger=logging.getLogger("ccat.config")
self.data = CSVDataSource()
self.globtmpls=TemplateRegistry()
self.rowtmpls=TemplateRegistry()
# make sure cheetah imports dependant classes automatically
cheetah_import_install()
def addCSVPath(self, path, key=None):
"""
Add a file or directory containing tabular data in CSV format
"""
self.data.loadFromPath(path)
def addGlobalTemplatePath(self,path):
"""
Add a file or directory path containing global templates
"""
self.logger.info("adding global template(s) in \"%s\"" % path)
self.globtmpls.addPath(path)
def addRowTemplatePath(self,path):
"""
Add a file or directory path containing row templates
"""
self.logger.info("adding row template(s) in \"%s\"" % path)
self.rowtmpls.addPath(path)
def setTemplateColumns(self,tcols=[]):
"""
Set the names of the columns which may contain a reference to a row
template
"""
self.logger.info("columns for row templates: %s" % ", ".join(tcols))
self.tmplcols = tcols
def setOutputFile(self,outf):
"""
Setup either stdout or open a file as a destination for template results
"""
if outf=="-":
self.logger.info("write output to stdout")
self.outfile=sys.stdout
else:
self.logger.info("write output to file %s" % outf)
self.outfile=open(outf,"w")
def setOutputDir(self,outd):
"""
Specify the directory where additional files created via the outfile def in
templates should be placed.
"""
if not os.path.isdir(outd):
raise ConfigError("%s: Not a directory" % outd)
self.outdir=os.path.normpath(outd)
self.logger.info("write separate output files to directory %s" % outd)
def validate(self):
"""
Validate the configuration
"""
if len(self.data) == 0:
self.logger.warn("No data was loaded from any csv file")
# check templates and data
while True:
# Operation with only one global template is possible
if len(self.globtmpls) > 0:
break
# Operation with some datasource and some row templates is possible
if len(self.data) > 0 or len(self.rowtmpls) > 0:
break
# Without anything we can do nothing
raise ConfigError("Either at least one global template and/or some data and at least one row template is required.")
# check template columns if row templates specified
if len(self.rowtmpls) > 0 and len(self.tmplcols) == 0:
raise ConfigError("Row templates specified but no template columns.")
| #!/usr/bin/env python
"""
Conficat internal configuration class
"""
import os
import re
import sys
import logging
from Cheetah.ImportHooks import install as cheetah_import_install
from TemplateRegistry import TemplateRegistry
from CSVDataSource import CSVDataSource
from ConfigError import ConfigError
class Config(object):
"""Conficat configuration class. Refer to CLI.py for an example"""
def __init__(self):
super(Config, self).__init__()
self.logger=logging.getLogger("ccat.config")
self.data = CSVDataSource()
self.globtmpls=TemplateRegistry()
self.rowtmpls=TemplateRegistry()
# make sure cheetah imports dependant classes automatically
cheetah_import_install()
def addCSVPath(self, path, key=None):
"""
Add a file or directory containing tabular data in CSV format
"""
data.loadFromPath(path)
def addGlobalTemplatePath(self,path):
"""
Add a file or directory path containing global templates
"""
self.logger.info("adding global template(s) in \"%s\"" % path)
self.globtmpls.addPath(path)
def addRowTemplatePath(self,path):
"""
Add a file or directory path containing row templates
"""
self.logger.info("adding row template(s) in \"%s\"" % path)
self.rowtmpls.addPath(path)
def setTemplateColumns(self,tcols=[]):
"""
Set the names of the columns which may contain a reference to a row
template
"""
self.logger.info("columns for row templates: %s" % ", ".join(tcols))
self.tmplcols = tcols
def setOutputFile(self,outf):
"""
Setup either stdout or open a file as a destination for template results
"""
if outf=="-":
self.logger.info("write output to stdout")
self.outfile=sys.stdout
else:
self.logger.info("write output to file %s" % outf)
self.outfile=open(outf,"w")
def setOutputDir(self,outd):
"""
Specify the directory where additional files created via the outfile def in
templates should be placed.
"""
if not os.path.isdir(outd):
raise ConfigError("%s: Not a directory" % outd)
self.outdir=os.path.normpath(outd)
self.logger.info("write separate output files to directory %s" % outd)
def validate(self):
"""
Validate the configuration
"""
if len(self.data) == 0:
self.logger.warn("No data was loaded from any csv file")
# check templates and data
while True:
# Operation with only one global template is possible
if len(self.globtmpls) > 0:
break
# Operation with some datasource and some row templates is possible
if len(self.data) > 0 or len(self.rowtmpls) > 0:
break
# Without anything we can do nothing
raise ConfigError("Either at least one global template and/or some data and at least one row template is required.")
# check template columns if row templates specified
if len(self.rowtmpls) > 0 and len(self.tmplcols) == 0:
raise ConfigError("Row templates specified but no template columns.")
| Python | 0.000001 |
ad2944a49b357494ff09a729b468f2fb19934909 | remove vertically-aligned assignments, per PEP8 | guestbook/__init__.py | guestbook/__init__.py | # coding: utf-8
import shelve
from datetime import datetime
from flask import Flask, request, render_template, redirect, escape, Markup
application = Flask(__name__)
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
@application.route('/')
def index():
greeting_list = load_data()
return render_template('index.html', greeting_list=greeting_list)
@application.route('/post', methods=['POST'])
def post():
name = request.form.get('name')
comment = request.form.get('comment')
create_at = datetime.now()
save_data(name, comment, create_at)
return redirect('/')
@application.template_filter('nl2br')
def nl2br_filter(s):
return escape(s).replace('\n', Markup('<br />'))
@application.template_filter('datetime_fmt')
def datetime_fmt_filter(dt):
return dt.strftime('%Y%m%d %H:%M:%S')
def main():
application.run('127.0.0.1', 8000)
if __name__ == "__main__":
application.run('127.0.0.1', 8000, debug=True)
| # coding: utf-8
import shelve
from datetime import datetime
from flask import Flask, request, render_template, redirect, escape, Markup
application = Flask(__name__)
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
@application.route('/')
def index():
greeting_list = load_data()
return render_template('index.html', greeting_list=greeting_list)
@application.route('/post', methods=['POST'])
def post():
name = request.form.get('name')
comment = request.form.get('comment')
create_at = datetime.now()
save_data(name, comment, create_at)
return redirect('/')
@application.template_filter('nl2br')
def nl2br_filter(s):
return escape(s).replace('\n', Markup('<br />'))
@application.template_filter('datetime_fmt')
def datetime_fmt_filter(dt):
return dt.strftime('%Y%m%d %H:%M:%S')
def main():
application.run('127.0.0.1', 8000)
if __name__ == "__main__":
application.run('127.0.0.1', 8000, debug=True)
| Python | 0.000006 |
1c6a2f87ebd75d69857590ec3918d65ee6468b81 | Add link to docs | homeassistant/components/feedreader.py | homeassistant/components/feedreader.py | """
Support for RSS/Atom feed.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/feedreader/
"""
from datetime import datetime
from logging import getLogger
import voluptuous as vol
from homeassistant.helpers.event import track_utc_time_change
REQUIREMENTS = ['feedparser==5.2.1']
_LOGGER = getLogger(__name__)
DOMAIN = "feedreader"
EVENT_FEEDREADER = "feedreader"
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
'urls': [vol.Url()],
}
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-few-public-methods
class FeedManager(object):
"""Abstraction over feedparser module."""
def __init__(self, url, hass):
"""Initialize the FeedManager object, poll every hour."""
self._url = url
self._feed = None
self._hass = hass
# Initialize last entry timestamp as epoch time
self._last_entry_timestamp = datetime.utcfromtimestamp(0).timetuple()
_LOGGER.debug('Loading feed %s', self._url)
self._update()
track_utc_time_change(hass, lambda now: self._update(),
minute=0, second=0)
def _log_no_entries(self):
"""Send no entries log at debug level."""
_LOGGER.debug('No new entries in feed %s', self._url)
def _update(self):
"""Update the feed and publish new entries in the event bus."""
import feedparser
_LOGGER.info('Fetching new data from feed %s', self._url)
self._feed = feedparser.parse(self._url,
etag=None if not self._feed
else self._feed.get('etag'),
modified=None if not self._feed
else self._feed.get('modified'))
if not self._feed:
_LOGGER.error('Error fetching feed data from %s', self._url)
else:
if self._feed.bozo != 0:
_LOGGER.error('Error parsing feed %s', self._url)
# Using etag and modified, if there's no new data available,
# the entries list will be empty
elif len(self._feed.entries) > 0:
_LOGGER.debug('Entries available in feed %s', self._url)
self._publish_new_entries()
self._last_entry_timestamp = \
self._feed.entries[0].published_parsed
else:
self._log_no_entries()
def _publish_new_entries(self):
"""Publish new entries to the event bus."""
new_entries = False
for entry in self._feed.entries:
# Consider only entries newer then the latest parsed one
if entry.published_parsed > self._last_entry_timestamp:
new_entries = True
entry.update({'feed_url': self._url})
self._hass.bus.fire(EVENT_FEEDREADER, entry)
if not new_entries:
self._log_no_entries()
def setup(hass, config):
"""Setup the feedreader component."""
urls = config.get(DOMAIN)['urls']
feeds = [FeedManager(url, hass) for url in urls]
return len(feeds) > 0
| """RSS/Atom feed reader for Home Assistant."""
from datetime import datetime
from logging import getLogger
import voluptuous as vol
from homeassistant.helpers.event import track_utc_time_change
REQUIREMENTS = ['feedparser==5.2.1']
_LOGGER = getLogger(__name__)
DOMAIN = "feedreader"
EVENT_FEEDREADER = "feedreader"
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
'urls': [vol.Url()],
}
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-few-public-methods
class FeedManager(object):
"""Abstraction over feedparser module."""
def __init__(self, url, hass):
"""Initialize the FeedManager object, poll every hour."""
self._url = url
self._feed = None
self._hass = hass
# Initialize last entry timestamp as epoch time
self._last_entry_timestamp = datetime.utcfromtimestamp(0).timetuple()
_LOGGER.debug('Loading feed %s', self._url)
self._update()
track_utc_time_change(hass, lambda now: self._update(),
minute=0, second=0)
def _log_no_entries(self):
"""Send no entries log at debug level."""
_LOGGER.debug('No new entries in feed %s', self._url)
def _update(self):
"""Update the feed and publish new entries in the event bus."""
import feedparser
_LOGGER.info('Fetching new data from feed %s', self._url)
self._feed = feedparser.parse(self._url,
etag=None if not self._feed
else self._feed.get('etag'),
modified=None if not self._feed
else self._feed.get('modified'))
if not self._feed:
_LOGGER.error('Error fetching feed data from %s', self._url)
else:
if self._feed.bozo != 0:
_LOGGER.error('Error parsing feed %s', self._url)
# Using etag and modified, if there's no new data available,
# the entries list will be empty
elif len(self._feed.entries) > 0:
_LOGGER.debug('Entries available in feed %s', self._url)
self._publish_new_entries()
self._last_entry_timestamp = \
self._feed.entries[0].published_parsed
else:
self._log_no_entries()
def _publish_new_entries(self):
"""Publish new entries to the event bus."""
new_entries = False
for entry in self._feed.entries:
# Consider only entries newer then the latest parsed one
if entry.published_parsed > self._last_entry_timestamp:
new_entries = True
entry.update({'feed_url': self._url})
self._hass.bus.fire(EVENT_FEEDREADER, entry)
if not new_entries:
self._log_no_entries()
def setup(hass, config):
"""Setup the feedreader component."""
urls = config.get(DOMAIN)['urls']
feeds = [FeedManager(url, hass) for url in urls]
return len(feeds) > 0
| Python | 0 |
f6efb0ff31ae8d0db5682cd7ad5b0921e3a4e924 | Bump version for new release. | openstack_auth/__init__.py | openstack_auth/__init__.py | # following PEP 386
__version__ = "1.0.7"
| # following PEP 386
__version__ = "1.0.6"
| Python | 0 |
8da7edb1311b73013dcf497c293212df5e0041c7 | use new nfw potential | ophiuchus/potential/oph.py | ophiuchus/potential/oph.py | # coding: utf-8
# cython: boundscheck=False
# cython: nonecheck=False
# cython: cdivision=True
# cython: wraparound=False
# cython: profile=False
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
from gala.units import galactic
from gala.potential import (CCompositePotential, MiyamotoNagaiPotential,
HernquistPotential, NFWPotential)
# Project
from . import WangZhaoBarPotential
class OphiuchusPotential(CCompositePotential):
r"""
Four-component Milky Way potential used for modeling the Ophiuchus stream.
Parameters
----------
units : iterable
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
spheroid : dict
Dictionary of parameter values for a :class:`gala.potential.HernquistPotential`.
disk : dict
Dictionary of parameter values for a :class:`gala.potential.MiyamotoNagaiPotential`.
halo : dict
Dictionary of parameter values for a :class:`gala.potential.NFWPotential`.
bar : dict
Dictionary of parameter values for a :class:`ophiuchus.potential.WangZhaoBarPotential`.
"""
def __init__(self, units=galactic,
spheroid=None, disk=None, halo=None, bar=None):
default_spheroid = dict(m=0., c=0.1)
default_disk = dict(m=5.E10, a=3, b=0.28) # similar to Bovy
default_halo = dict(v_c=0.19, r_s=30., c=0.9)
default_bar = dict(m=1.8E10 / 1.15, r_s=1.,
alpha=0.349065850398, Omega=0.06136272990322247) # from Wang, Zhao, et al.
if disk is None:
disk = default_disk
else:
for k, v in default_disk.items():
if k not in disk:
disk[k] = v
if spheroid is None:
spheroid = default_spheroid
else:
for k, v in default_spheroid.items():
if k not in spheroid:
spheroid[k] = v
if halo is None:
halo = default_halo
else:
for k, v in default_halo.items():
if k not in halo:
halo[k] = v
if bar is None:
bar = default_bar
else:
for k, v in default_bar.items():
if k not in bar:
bar[k] = v
super(OphiuchusPotential, self).__init__()
self["spheroid"] = HernquistPotential(units=units, **spheroid)
self["disk"] = MiyamotoNagaiPotential(units=units, **disk)
self["halo"] = NFWPotential(units=units, **halo)
self["bar"] = WangZhaoBarPotential(units=units, **bar)
| # coding: utf-8
# cython: boundscheck=False
# cython: nonecheck=False
# cython: cdivision=True
# cython: wraparound=False
# cython: profile=False
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
from gala.units import galactic
from gala.potential import (CCompositePotential, MiyamotoNagaiPotential,
HernquistPotential, FlattenedNFWPotential)
# Project
from . import WangZhaoBarPotential
class OphiuchusPotential(CCompositePotential):
r"""
Four-component Milky Way potential used for modeling the Ophiuchus stream.
Parameters
----------
units : iterable
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
spheroid : dict
Dictionary of parameter values for a :class:`gala.potential.HernquistPotential`.
disk : dict
Dictionary of parameter values for a :class:`gala.potential.MiyamotoNagaiPotential`.
halo : dict
Dictionary of parameter values for a :class:`gala.potential.FlattenedNFWPotential`.
bar : dict
Dictionary of parameter values for a :class:`ophiuchus.potential.WangZhaoBarPotential`.
"""
def __init__(self, units=galactic,
spheroid=None, disk=None, halo=None, bar=None):
default_spheroid = dict(m=0., c=0.1)
default_disk = dict(m=5.E10, a=3, b=0.28) # similar to Bovy
default_halo = dict(v_c=0.19, r_s=30., q_z=0.9)
default_bar = dict(m=1.8E10 / 1.15, r_s=1.,
alpha=0.349065850398, Omega=0.06136272990322247) # from Wang, Zhao, et al.
if disk is None:
disk = default_disk
else:
for k,v in default_disk.items():
if k not in disk:
disk[k] = v
if spheroid is None:
spheroid = default_spheroid
else:
for k,v in default_spheroid.items():
if k not in spheroid:
spheroid[k] = v
if halo is None:
halo = default_halo
else:
for k,v in default_halo.items():
if k not in halo:
halo[k] = v
if bar is None:
bar = default_bar
else:
for k,v in default_bar.items():
if k not in bar:
bar[k] = v
super(OphiuchusPotential,self).__init__()
self["spheroid"] = HernquistPotential(units=units, **spheroid)
self["disk"] = MiyamotoNagaiPotential(units=units, **disk)
self["halo"] = FlattenedNFWPotential(units=units, **halo)
self["bar"] = WangZhaoBarPotential(units=units, **bar)
| Python | 0.000001 |
a66ff915dcaee3e1db370196a3b40b612eb43d19 | Add support to template_name_suffix in get_template_names methods | opps/views/generic/list.py | opps/views/generic/list.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.list import ListView as DjangoListView
from django.contrib.sites.models import get_current_site
from django.utils import timezone
from django.conf import settings
from opps.views.generic.base import View
from opps.containers.models import ContainerBox
class ListView(View, DjangoListView):
def get_template_names(self):
templates = []
domain_folder = self.get_template_folder()
if not self.long_slug:
templates.append('{}/none.html'.format(domain_folder))
return templates
list_name = 'list'
if self.template_name_suffix:
list_name = "{}{}".format(list_name, self.template_name_suffix)
if self.channel:
# Check layout, change via admin
if self.channel.layout != u'default':
list_name = self.channel.layout
if self.channel.group and self.channel.parent:
templates.append('{}/{}/{}.html'.format(
domain_folder, self.channel.parent.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{}/{}/{}_paginated.html'.format(
domain_folder, self.channel.parent.long_slug,
list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{}/{}/{}_paginated.html'.format(
domain_folder, self.channel.long_slug, list_name))
templates.append('{}/{}/{}.html'.format(
domain_folder, self.channel.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{}/{}_paginated.html'.format(domain_folder,
list_name))
templates.append('{}/{}.html'.format(domain_folder, list_name))
return templates
def get_queryset(self):
self.site = get_current_site(self.request)
self.long_slug = self.get_long_slug()
if not self.long_slug:
return None
self.set_channel_rules()
self.articleboxes = ContainerBox.objects.filter(
channel__long_slug=self.long_slug)
is_paginated = self.page_kwarg in self.request.GET
if not is_paginated:
for box in self.articleboxes:
self.excluded_ids.update(
[a.pk for a in box.ordered_containers()])
queryset = super(ListView, self).get_queryset()
filters = {}
filters['site_domain'] = self.site.domain
filters['channel_long_slug__in'] = self.channel_long_slug
filters['date_available__lte'] = timezone.now()
filters['published'] = True
if self.channel and self.channel.is_root_node() and not is_paginated:
filters['show_on_root_channel'] = True
queryset = queryset.filter(**filters).exclude(pk__in=self.excluded_ids)
return queryset._clone()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.list import ListView as DjangoListView
from django.contrib.sites.models import get_current_site
from django.utils import timezone
from django.conf import settings
from opps.views.generic.base import View
from opps.containers.models import ContainerBox
class ListView(View, DjangoListView):
def get_template_names(self):
templates = []
domain_folder = self.get_template_folder()
if not self.long_slug:
templates.append('{}/none.html'.format(domain_folder))
return templates
list_name = 'list'
if self.channel:
# Check layout, change via admin
if self.channel.layout != u'default':
list_name = self.channel.layout
if self.channel.group and self.channel.parent:
templates.append('{}/{}/{}.html'.format(
domain_folder, self.channel.parent.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{}/{}/{}_paginated.html'.format(
domain_folder, self.channel.parent.long_slug,
list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{}/{}/{}_paginated.html'.format(
domain_folder, self.channel.long_slug, list_name))
templates.append('{}/{}/{}.html'.format(
domain_folder, self.channel.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{}/{}_paginated.html'.format(domain_folder,
list_name))
templates.append('{}/{}.html'.format(domain_folder, list_name))
return templates
def get_queryset(self):
self.site = get_current_site(self.request)
self.long_slug = self.get_long_slug()
if not self.long_slug:
return None
self.set_channel_rules()
self.articleboxes = ContainerBox.objects.filter(
channel__long_slug=self.long_slug)
is_paginated = self.page_kwarg in self.request.GET
if not is_paginated:
for box in self.articleboxes:
self.excluded_ids.update(
[a.pk for a in box.ordered_containers()])
queryset = super(ListView, self).get_queryset()
filters = {}
filters['site_domain'] = self.site.domain
filters['channel_long_slug__in'] = self.channel_long_slug
filters['date_available__lte'] = timezone.now()
filters['published'] = True
if self.channel and self.channel.is_root_node() and not is_paginated:
filters['show_on_root_channel'] = True
queryset = queryset.filter(**filters).exclude(pk__in=self.excluded_ids)
return queryset._clone()
| Python | 0 |
7a99ade694c5844727ca33461dd3ad5271b61f14 | Improve q_n tests. | hic/test/test_flow.py | hic/test/test_flow.py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from .. import flow
def test_qn(seed=1248):
# q_n(0) = 1
q = flow.qn(2, 0)
assert q == 1+0j, \
'Incorrect single-particle q_n ({} != 1).'.format(q)
# q_3(uniform phi) = -1
q = flow.qn(3, np.arange(-np.pi, np.pi, 10))
assert abs(q+1) < 1e-12, \
'Incorrect isotropic q_n ({} != -1).'.format(q)
# specific example
np.random.seed(seed)
phi = 2*np.pi*(np.random.rand(10) - .5)
q = np.array([flow.qn(n, phi) for n in range(2, 5)])
correct_q = np.array((
-0.23701789876111995+1.9307467860155012j,
0.7294873796006498+0.4925428484240118j,
2.0248053489550459-0.23452484252744438j
))
assert np.allclose(q, correct_q), \
'Incorrect random q_n.\n{} != {}'.format(q, correct_q)
def test_flow_cumulant():
pass
| # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from .. import flow
def test_qn():
assert flow.qn(2, 0) == 1+0j, \
'Single-particle q_n.'
assert np.allclose(flow.qn(3, np.arange(-np.pi, np.pi, 10)), -1+0j), \
'Isotropic q_n.'
def test_flow_cumulant():
pass
| Python | 0.000012 |
7c10feeed640f4d1a66bb3207ade980733409ad9 | improve unit test | witica/test_source.py | witica/test_source.py | # coding=utf-8
import os
import unittest
import pkg_resources
from witica.source import Source, SourceItemList
from witica.log import *
from witica.metadata import extractor
class TestSourceItemList(unittest.TestCase):
def setUp(self):
Logger.start(verbose=False)
self.resource_path = pkg_resources.resource_filename("witica","test/files")
source_config = {}
source_config["version"] = 1
source_config["path"] = self.resource_path
self.source = FolderSource("test", source_config)
extractor.register_default_extractors()
def tearDown(self):
extractor.registered_extractors = []
pkg_resources.cleanup_resources()
Logger.stop()
def test_match(self):
self.assertTrue(SourceItemList.match("test/*", "test/abc"))
self.assertFalse(SourceItemList.match("test/*", "test/abc/def"))
self.assertTrue(SourceItemList.match("test/**", "test/abc/def"))
self.assertTrue(SourceItemList.match("test/*/def", "test/abc/def"))
self.assertTrue(SourceItemList.match("test/**/de?", "test/abc/def"))
self.assertFalse(SourceItemList.match("test/**/def", "test/abc/ghi"))
def test_count_items(self):
self.assertEqual(9, len(self.source.items))
def test_item_exists(self):
self.assertTrue(self.source.items["simple"].exists)
class FolderSource(Source):
def __init__(self, source_id, config, prefix = ""):
super(FolderSource, self).__init__(source_id, config, prefix)
self.source_dir = config["path"]
self.state = {"cursor" : ""}
if not(os.path.exists(self.source_dir)):
raise IOError("Source folder '" + self.source_dir + "' does not exist.")
def update_cache(self):
pass
def update_change_status(self):
pass
def fetch_changes(self):
pass
def get_abs_meta_filename(self, local_filename):
return self.get_absolute_path(os.path.join('meta' + os.sep + local_filename))
def get_absolute_path(self, localpath):
return os.path.abspath(os.path.join(self.source_dir, localpath))
| # coding=utf-8
import os
import unittest
import pkg_resources
from witica.source import Source, SourceItemList
from witica.log import *
from witica.metadata import extractor
class TestSourceItemList(unittest.TestCase):
def setUp(self):
Logger.start(verbose=False)
self.resource_path = pkg_resources.resource_filename("witica","test/files")
source_config = {}
source_config["version"] = 1
source_config["path"] = self.resource_path
self.source = FolderSource("test", source_config)
extractor.register_default_extractors()
def tearDown(self):
extractor.registered_extractors = []
Logger.stop()
def test_match(self):
self.assertTrue(SourceItemList.match("test/*", "test/abc"))
self.assertFalse(SourceItemList.match("test/*", "test/abc/def"))
self.assertTrue(SourceItemList.match("test/**", "test/abc/def"))
self.assertTrue(SourceItemList.match("test/*/def", "test/abc/def"))
self.assertTrue(SourceItemList.match("test/**/de?", "test/abc/def"))
self.assertFalse(SourceItemList.match("test/**/def", "test/abc/ghi"))
def test_count_items(self):
self.assertEqual(9, len(self.source.items))
class FolderSource(Source):
def __init__(self, source_id, config, prefix = ""):
super(FolderSource, self).__init__(source_id, config, prefix)
self.source_dir = config["path"]
self.state = {"cursor" : ""}
if not(os.path.exists(self.source_dir)):
raise IOError("Source folder '" + self.source_dir + "' does not exist.")
def update_cache(self):
pass
def update_change_status(self):
pass
def fetch_changes(self):
pass
def get_abs_meta_filename(self, local_filename):
return self.get_absolute_path(os.path.join('meta' + os.sep + local_filename))
def get_absolute_path(self, localpath):
return os.path.abspath(os.path.join(self.source_dir, localpath))
| Python | 0.000002 |
1b7e6d41a6832ef7a8f9dafe0cd8580356f8e9da | check regex match before access in flickr module | mygpo/data/flickr.py | mygpo/data/flickr.py | #
# This file is part of gpodder.net.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
import re
import urllib
from django.conf import settings
from mygpo.core.json import json
def get_photo_sizes(photo_id):
api_key = settings.FLICKR_API_KEY
request = 'http://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s&photo_id=%s&format=json' % (api_key, photo_id)
resp = urllib.urlopen(request).read()
extract_re = '^jsonFlickrApi\((.*)\)$'
m = re.match(extract_re, resp)
if not m:
return []
resp_obj = json.loads(m.group(1))
try:
return resp_obj['sizes']['size']
except KeyError:
return []
def get_photo_id(url):
photo_id_re = 'http://.*flickr.com/[^/]+/([^_]+)_.*'
return re.match(photo_id_re, url).group(1)
def is_flickr_image(url):
return re.search('flickr\.com.*\.(jpg|jpeg|png|gif)', url)
def get_display_photo(url, label='Medium'):
photo_id = get_photo_id(url)
sizes = get_photo_sizes(photo_id)
for s in sizes:
if s['label'] == label:
return s['source']
return url
| #
# This file is part of gpodder.net.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
import re
import urllib
from django.conf import settings
from mygpo.core.json import json
def get_photo_sizes(photo_id):
api_key = settings.FLICKR_API_KEY
request = 'http://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s&photo_id=%s&format=json' % (api_key, photo_id)
resp = urllib.urlopen(request).read()
extract_re = '^jsonFlickrApi\((.*)\)$'
resp_content = re.match(extract_re, resp).group(1)
resp_obj = json.loads(resp_content)
try:
return resp_obj['sizes']['size']
except KeyError:
return []
def get_photo_id(url):
photo_id_re = 'http://.*flickr.com/[^/]+/([^_]+)_.*'
return re.match(photo_id_re, url).group(1)
def is_flickr_image(url):
return re.search('flickr\.com.*\.(jpg|jpeg|png|gif)', url)
def get_display_photo(url, label='Medium'):
photo_id = get_photo_id(url)
sizes = get_photo_sizes(photo_id)
for s in sizes:
if s['label'] == label:
return s['source']
return url
| Python | 0 |
963481958af78655e02a5d7d01e156f9b6ee506e | Correct HStoreField code | hs_core/hydro_realtime_signal_processor.py | hs_core/hydro_realtime_signal_processor.py | from django.db import models
from haystack.signals import RealtimeSignalProcessor
from haystack.exceptions import NotHandled
import logging
import types
from haystack.query import SearchQuerySet
from haystack.utils import get_identifier
logger = logging.getLogger(__name__)
class HydroRealtimeSignalProcessor(RealtimeSignalProcessor):
"""
Customized for the fact that all indexed resources are subclasses of BaseResource.
Notes:
1. RealtimeSignalProcessor already plumbs in all class updates. We might want to be more specific.
2. The class sent to this is a subclass of BaseResource, or another class.
3. Thus, we want to capture cases in which it is an appropriate instance, and respond.
"""
def handle_save(self, sender, instance, **kwargs):
"""
Given an individual model instance, determine which backends the
update should be sent to & update the object on those backends.
"""
from hs_core.models import BaseResource, CoreMetaData, AbstractMetaDataElement
from hs_access_control.models import ResourceAccess
from django.contrib.postgres.fields import HStoreField
if isinstance(instance, BaseResource):
if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):
# work around for failure of super(BaseResource, instance) to work properly.
# this always succeeds because this is a post-save object action.
newbase = BaseResource.objects.get(pk=instance.pk)
newsender = BaseResource
using_backends = self.connection_router.for_write(instance=newbase)
for using in using_backends:
# if object is public/discoverable or becoming public/discoverable, index it
# test whether the object should be exposed.
if instance.show_in_discover:
try:
index = self.connections[using].get_unified_index().get_index(newsender)
index.update_object(newbase, using=using)
except NotHandled:
logger.exception("Failure: changes to %s with short_id %s not added to Solr Index.",
str(type(instance)), newbase.short_id)
# if object is private or becoming private, delete from index
else: # not to be shown in discover
try:
index = self.connections[using].get_unified_index().get_index(newsender)
index.remove_object(newbase, using=using)
except NotHandled:
logger.exception("Failure: delete of %s with short_id %s failed.",
str(type(instance)), newbase.short_id)
elif isinstance(instance, ResourceAccess):
# automatically a BaseResource; just call the routine on it.
try:
newbase = instance.resource
self.handle_save(BaseResource, newbase)
except Exception as e:
logger.exception("{} exception: {}".format(type(instance), e))
elif isinstance(instance, CoreMetaData):
try:
newbase = instance.resource
self.handle_save(BaseResource, newbase)
except Exception:
logger.exception("{} exception: {}".format(type(instance), e))
elif isinstance(instance, AbstractMetaDataElement):
try:
# resolve the BaseResource corresponding to the metadata element.
newbase = instance.metadata.resource
self.handle_save(BaseResource, newbase)
except Exception as e:
logger.exception("{} exception: {}".format(type(instance), e))
elif isinstance(instance, HStoreField):
try:
newbase = BaseResource.objects.get(extra_metadata=instance)
self.handle_save(BaseResource, newbase)
except Exception as e:
logger.exception("{} exception: {}".format(type(instance), e))
def handle_delete(self, sender, instance, **kwargs):
"""
Ignore delete events as this is accomplished separately.
"""
pass
| from django.db import models
from haystack.signals import RealtimeSignalProcessor
from haystack.exceptions import NotHandled
import logging
import types
from haystack.query import SearchQuerySet
from haystack.utils import get_identifier
logger = logging.getLogger(__name__)
class HydroRealtimeSignalProcessor(RealtimeSignalProcessor):
"""
Customized for the fact that all indexed resources are subclasses of BaseResource.
Notes:
1. RealtimeSignalProcessor already plumbs in all class updates. We might want to be more specific.
2. The class sent to this is a subclass of BaseResource, or another class.
3. Thus, we want to capture cases in which it is an appropriate instance, and respond.
"""
def handle_save(self, sender, instance, **kwargs):
"""
Given an individual model instance, determine which backends the
update should be sent to & update the object on those backends.
"""
from hs_core.models import BaseResource, CoreMetaData, AbstractMetaDataElement
from hs_access_control.models import ResourceAccess
if isinstance(instance, BaseResource):
if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):
# work around for failure of super(BaseResource, instance) to work properly.
# this always succeeds because this is a post-save object action.
newbase = BaseResource.objects.get(pk=instance.pk)
newsender = BaseResource
using_backends = self.connection_router.for_write(instance=newbase)
for using in using_backends:
# if object is public/discoverable or becoming public/discoverable, index it
# test whether the object should be exposed.
if instance.show_in_discover:
try:
index = self.connections[using].get_unified_index().get_index(newsender)
index.update_object(newbase, using=using)
except NotHandled:
logger.exception("Failure: changes to %s with short_id %s not added to Solr Index.",
str(type(instance)), newbase.short_id)
# if object is private or becoming private, delete from index
else: # not to be shown in discover
try:
index = self.connections[using].get_unified_index().get_index(newsender)
index.remove_object(newbase, using=using)
except NotHandled:
logger.exception("Failure: delete of %s with short_id %s failed.",
str(type(instance)), newbase.short_id)
elif isinstance(instance, ResourceAccess):
# automatically a BaseResource; just call the routine on it.
try:
newbase = instance.resource
self.handle_save(BaseResource, newbase)
except Exception as e:
logger.exception("{} exception: {}".format(type(instance), e))
elif isinstance(instance, CoreMetaData):
try:
newbase = instance.resource
self.handle_save(BaseResource, newbase)
except Exception:
logger.exception("{} exception: {}".format(type(instance), e))
elif isinstance(instance, AbstractMetaDataElement):
try:
# resolve the BaseResource corresponding to the metadata element.
newbase = instance.metadata.resource
self.handle_save(BaseResource, newbase)
except Exception as e:
logger.exception("{} exception: {}".format(type(instance), e))
else: # could be extended metadata element
try:
newbase = BaseResource.objects.get(extra_metadata=instance)
self.handle_save(BaseResource, newbase)
except Exception as e:
logger.exception("{} exception: {}".format(type(instance), e))
def handle_delete(self, sender, instance, **kwargs):
"""
Ignore delete events as this is accomplished separately.
"""
pass
| Python | 0.000041 |
81d01175a7403b3e627738056ef9436e8172e51e | Enforce python 3.6 | shared_infra/lambdas/common/setup.py | shared_infra/lambdas/common/setup.py | import os
from setuptools import find_packages, setup
def local_file(name):
return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
SOURCE = local_file('src')
setup(
name='wellcome_lambda_utils',
packages=find_packages(SOURCE),
package_dir={'': SOURCE},
version='1.0.0',
install_requires=['boto'],
python_requires='>=3.6',
description='Common lib for lambdas',
author='Wellcome digital platform',
author_email='wellcomedigitalplatform@wellcome.ac.uk',
url='https://github.com/wellcometrust/platform',
keywords=['lambda', 'utils'],
classifiers=[],
)
| import os
from setuptools import find_packages, setup
def local_file(name):
return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
SOURCE = local_file('src')
setup(
name='wellcome_lambda_utils',
packages=find_packages(SOURCE),
package_dir={'': SOURCE},
version='1.0.0',
install_requires=['boto'],
python_requires='>=3',
description='Common lib for lambdas',
author='Wellcome digital platform',
author_email='wellcomedigitalplatform@wellcome.ac.uk',
url='https://github.com/wellcometrust/platform',
keywords=['lambda', 'utils'],
classifiers=[],
)
| Python | 0.000187 |
3abe25d2272e2a0111511b68407da0ef3c53f59e | Use wizard settings during samba provision | nazs/samba/module.py | nazs/samba/module.py | from nazs import module
from nazs.commands import run
from nazs.sudo import root
import os
import logging
from .models import DomainSettings
logger = logging.getLogger(__name__)
class Samba(module.Module):
"""
Samba 4 module, it deploys samba AD and file server
"""
ETC_FILE = '/etc/samba/smb.conf'
install_wizard = 'samba:install'
def install(self):
"""
Installation procedure, it writes basic smb.conf and uses samba-tool to
provision the domain
"""
domain_settings = DomainSettings.get()
with root():
if os.path.exists(self.ETC_FILE):
os.remove(self.ETC_FILE)
if domain_settings.mode == 'ad':
run("samba-tool domain provision "
" --domain='zentyal' "
" --workgroup='zentyal' "
"--realm='zentyal.lan' "
"--use-xattrs=yes "
"--use-rfc2307 "
"--server-role='domain controller' "
"--use-ntvfs "
"--adminpass='foobar1!'")
elif domain_settings.mode == 'member':
# TODO
pass
| from nazs import module
from nazs.commands import run
from nazs.sudo import root
import os
import logging
logger = logging.getLogger(__name__)
class Samba(module.Module):
"""
Samba 4 module, it deploys samba AD and file server
"""
ETC_FILE = '/etc/samba/smb.conf'
install_wizard = 'samba:install'
def install(self):
"""
Installation procedure, it writes basic smb.conf and uses samba-tool to
provision the domain
"""
with root():
if os.path.exists(self.ETC_FILE):
os.remove(self.ETC_FILE)
run("samba-tool domain provision "
" --domain='zentyal' "
" --workgroup='zentyal' "
"--realm='zentyal.lan' "
"--use-xattrs=yes "
"--use-rfc2307 "
"--server-role='domain controller' "
"--use-ntvfs "
"--adminpass='foobar1!'")
| Python | 0 |
2383497f25e400aa27c600d3a30526d118e2a6dc | fix scan, follow new scan chain | host/test_register.py | host/test_register.py | from scan.scan import ScanBase
class TestRegisters(ScanBase):
def __init__(self, config_file, definition_file = None, bit_file = None, device = None, scan_identifier = "test_register", scan_data_path = None):
super(TestRegisters, self).__init__(config_file = config_file, definition_file = definition_file, bit_file = bit_file, device = device, scan_identifier = scan_identifier, scan_data_path = scan_data_path)
def scan(self, **kwargs):
number_of_errors = self.register_utils.test_global_register()
print 'Global Register Test: Found', number_of_errors, "error(s)"
number_of_errors = self.register_utils.test_pixel_register()
print 'Pixel Register Test: Found', number_of_errors, "error(s)"
sn = self.register_utils.read_chip_sn()
print "Chip S/N:", sn
if __name__ == "__main__":
import configuration
scan = TestRegisters(config_file = configuration.config_file, bit_file = configuration.bit_file, scan_data_path = configuration.scan_data_path)
scan.start()
scan.stop() | from scan.scan import ScanBase
class TestRegisters(ScanBase):
def __init__(self, config_file, definition_file = None, bit_file = None, device = None, scan_identifier = "test_register", scan_data_path = None):
super(TestRegisters, self).__init__(config_file = config_file, definition_file = definition_file, bit_file = bit_file, device = device, scan_identifier = scan_identifier, scan_data_path = scan_data_path)
def start(self, configure = True):
super(TestRegisters, self).start(configure)
number_of_errors = scan.register_utils.test_global_register()
print 'Global Register Test: Found', number_of_errors, "error(s)"
number_of_errors = scan.register_utils.test_pixel_register()
print 'Pixel Register Test: Found', number_of_errors, "error(s)"
sn = scan.register_utils.read_chip_sn()
print "Chip S/N:", sn
print 'Reset SRAM FIFO...'
scan.readout_utils.reset_sram_fifo()
print 'Done!'
if __name__ == "__main__":
import configuration
scan = TestRegisters(config_file = configuration.config_file, bit_file = configuration.bit_file, scan_data_path = configuration.scan_data_path)
scan.start()
| Python | 0 |
63ed4199e5cb3f8eb9a6b294ac8c6df12f9b5f56 | Add last_request function to httprequest module | httpretty/__init__.py | httpretty/__init__.py | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
__version__ = version = '0.6.0'
import sys
from .core import httpretty, httprettified
from .errors import HTTPrettyError
from .core import URIInfo
HTTPretty = httpretty
activate = httprettified
SELF = sys.modules[__name__]
for attr in [name.decode() for name in httpretty.METHODS] + ['register_uri', 'enable', 'disable', 'is_enabled', 'Response']:
setattr(SELF, attr, getattr(httpretty, attr))
def last_request():
return httpretty.last_request
| # #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
__version__ = version = '0.6.0'
import sys
from .core import httpretty, httprettified
from .errors import HTTPrettyError
from .core import URIInfo
HTTPretty = httpretty
activate = httprettified
SELF = sys.modules[__name__]
for attr in [name.decode() for name in httpretty.METHODS] + ['register_uri', 'enable', 'disable', 'is_enabled', 'Response']:
setattr(SELF, attr, getattr(httpretty, attr))
| Python | 0.000002 |
2e4a934203b4d736a4180a970cacca508400ea7e | Update runcrons Command() to timezone.now() | django_cron/management/commands/runcrons.py | django_cron/management/commands/runcrons.py | from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from django_cron import CronJobManager
from datetime import datetime
from optparse import make_option
DEFAULT_LOCK_TIME = 15*60
def get_class( kls ):
"""TODO: move to django-common app.
Converts a string to a class. Courtesy: http://stackoverflow.com/questions/452969/does-python-have-an-equivalent-to-java-class-forname/452981#452981"""
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
CRONS_TO_RUN = map(lambda x: get_class(x), settings.CRON_CLASSES)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--force', action='store_true', help='Force cron runs'),
)
def handle(self, *args, **options):
for cron_class in CRONS_TO_RUN:
if not cache.get(cron_class.__name__):
instance = cron_class()
timeout = DEFAULT_LOCK_TIME
try:
timeout = settings.DJANGO_CRON_LOCK_TIME
except:
pass
cache.set(cron_class.__name__, timezone.now(), timeout)
CronJobManager.run(instance, options['force'])
cache.delete(cron_class.__name__)
else:
print "%s failed: lock has been found. Other cron started at %s" % (cron_class.__name__, cache.get(cron_class.__name__)) | from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.cache import cache
from django_cron import CronJobManager
from datetime import datetime
from optparse import make_option
DEFAULT_LOCK_TIME = 15*60
def get_class( kls ):
"""TODO: move to django-common app.
Converts a string to a class. Courtesy: http://stackoverflow.com/questions/452969/does-python-have-an-equivalent-to-java-class-forname/452981#452981"""
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
CRONS_TO_RUN = map(lambda x: get_class(x), settings.CRON_CLASSES)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--force', action='store_true', help='Force cron runs'),
)
def handle(self, *args, **options):
for cron_class in CRONS_TO_RUN:
if not cache.get(cron_class.__name__):
instance = cron_class()
timeout = DEFAULT_LOCK_TIME
try:
timeout = settings.DJANGO_CRON_LOCK_TIME
except:
pass
cache.set(cron_class.__name__, datetime.now(), timeout)
CronJobManager.run(instance, options['force'])
cache.delete(cron_class.__name__)
else:
print "%s failed: lock has been found. Other cron started at %s" % (cron_class.__name__, cache.get(cron_class.__name__)) | Python | 0.000001 |
abbe9b391ed32a07c5e912e3683ff7668e12eeb5 | bump to new version | octbrowser/__init__.py | octbrowser/__init__.py | __version__ = '0.4.1'
| __version__ = '0.4'
| Python | 0 |
5fb03068113ccdaebb2496f127146617f8931c02 | Add depth users to user summary | scripts/analytics/user_summary.py | scripts/analytics/user_summary.py | import pytz
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from modularodm import Q
from website.app import init_app
from website.models import User, NodeLog
from scripts.analytics.base import SummaryAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
LOG_THRESHOLD = 11
# Modified from scripts/analytics/depth_users.py
def count_user_logs(user):
logs = NodeLog.find(Q('user', 'eq', user._id))
length = logs.count()
if length > 0:
item = logs[0]
if item.action == 'project_created' and item.node.is_bookmark_collection:
length -= 1
return length
# Modified from scripts/analytics/depth_users.py
def get_number_of_depth_users(active_users):
depth_users = 0
for user in active_users:
log_count = count_user_logs(user)
if log_count >= LOG_THRESHOLD:
depth_users += 1
return depth_users
class UserSummary(SummaryAnalytics):
@property
def collection_name(self):
return 'user_summary'
def get_events(self, date):
super(UserSummary, self).get_events(date)
# Convert to a datetime at midnight for queries and the timestamp
timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC)
query_datetime = timestamp_datetime + timedelta(1)
active_users = User.find(
Q('is_registered', 'eq', True) &
Q('password', 'ne', None) &
Q('merged_by', 'eq', None) &
Q('date_disabled', 'eq', None) &
Q('date_confirmed', 'ne', None) &
Q('date_confirmed', 'lt', query_datetime)
)
depth_users = get_number_of_depth_users(active_users)
counts = {
'keen': {
'timestamp': timestamp_datetime.isoformat()
},
'status': {
'active': active_users.count(),
'depth': depth_users,
'unconfirmed': User.find(
Q('date_registered', 'lt', query_datetime) &
Q('date_confirmed', 'eq', None)
).count(),
'deactivated': User.find(
Q('date_disabled', 'ne', None) &
Q('date_disabled', 'lt', query_datetime)
).count()
}
}
logger.info(
'Users counted. Active: {}, Depth: {}, Unconfirmed: {}, Deactivated: {}'.format(
counts['status']['active'],
counts['status']['depth'],
counts['status']['unconfirmed'],
counts['status']['deactivated']
)
)
return [counts]
def get_class():
return UserSummary
if __name__ == '__main__':
init_app()
user_summary = UserSummary()
args = user_summary.parse_args()
date = parse(args.date).date() if args.date else None
events = user_summary.get_events(date)
user_summary.send_events(events)
| import pytz
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from modularodm import Q
from website.app import init_app
from website.models import User
from scripts.analytics.base import SummaryAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class UserSummary(SummaryAnalytics):
@property
def collection_name(self):
return 'user_summary'
def get_events(self, date):
super(UserSummary, self).get_events(date)
# Convert to a datetime at midnight for queries and the timestamp
timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC)
query_datetime = timestamp_datetime + timedelta(1)
counts = {
'keen': {
'timestamp': timestamp_datetime.isoformat()
},
'status': {
'active': User.find(
Q('is_registered', 'eq', True) &
Q('password', 'ne', None) &
Q('merged_by', 'eq', None) &
Q('date_disabled', 'eq', None) &
Q('date_confirmed', 'ne', None) &
Q('date_confirmed', 'lt', query_datetime)
).count(),
'unconfirmed': User.find(
Q('date_registered', 'lt', query_datetime) &
Q('date_confirmed', 'eq', None)
).count(),
'deactivated': User.find(
Q('date_disabled', 'ne', None) &
Q('date_disabled', 'lt', query_datetime)
).count()
}
}
logger.info(
'Users counted. Active: {}, Unconfirmed: {}, Deactivated: {}'.format(
counts['status']['active'],
counts['status']['unconfirmed'],
counts['status']['deactivated']
)
)
return [counts]
def get_class():
return UserSummary
if __name__ == '__main__':
init_app()
user_summary = UserSummary()
args = user_summary.parse_args()
date = parse(args.date).date() if args.date else None
events = user_summary.get_events(date)
user_summary.send_events(events)
| Python | 0.00001 |
3582191d79646041ec589e2f1928c4cc560f5eaa | Add model, iOS, id to underlevel script. | scripts/my_export_accounts_csv.py | scripts/my_export_accounts_csv.py | #!/usr/bin/env python3
import csv
import sys
from datetime import datetime
from pathlib import Path
monocle_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(monocle_dir))
from monocle.shared import ACCOUNTS
accounts_file = monocle_dir / 'accounts.csv'
try:
now = datetime.now().strftime("%Y-%m-%d-%H%M")
accounts_file.rename('accounts-{}.csv'.format(now))
except FileNotFoundError:
pass
banned = []
invalid = []
underlevel = []
with accounts_file.open('w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(('username', 'password', 'provider', 'model', 'iOS', 'id'))
for account in ACCOUNTS.values():
if account.get('banned', False):
banned.append(account)
continue
if not account.get('level', False):
invalid.append(account)
continue
if account.get('level') < 2:
underlevel.append(account)
writer.writerow((account['username'],
account['password'],
account['provider'],
account['model'],
account['iOS'],
account['id']))
if banned:
banned_file = monocle_dir / 'banned.csv'
write_header = not banned_file.exists()
with banned_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider', 'level', 'created', 'last used'))
for account in banned:
row = [account['username'], account['password'], account['provider']]
row.append(account.get('level'))
try:
row.append(datetime.fromtimestamp(account['created']).strftime('%x %X'))
except KeyError:
row.append(None)
try:
row.append(datetime.fromtimestamp(account['time']).strftime('%x %X'))
except KeyError:
row.append(None)
writer.writerow(row)
if invalid:
invalid_file = monocle_dir / 'invalid.csv'
write_header = not invalid_file.exists()
with invalid_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider'))
for account in invalid:
row = [account['username'], account['password'], account['provider']]
writer.writerow(row)
if underlevel:
underlevel_file = monocle_dir / 'underlevel.csv'
write_header = not underlevel_file.exists()
with underlevel_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider', 'model', 'iOS', 'id'))
for account in underlevel:
row = [account['username'], account['password'], account['provider'], account['model'], account['iOS'], account['id']]
writer.writerow(row)
print('Done!')
| #!/usr/bin/env python3
import csv
import sys
from datetime import datetime
from pathlib import Path
monocle_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(monocle_dir))
from monocle.shared import ACCOUNTS
accounts_file = monocle_dir / 'accounts.csv'
try:
now = datetime.now().strftime("%Y-%m-%d-%H%M")
accounts_file.rename('accounts-{}.csv'.format(now))
except FileNotFoundError:
pass
banned = []
invalid = []
underlevel = []
with accounts_file.open('w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(('username', 'password', 'provider', 'model', 'iOS', 'id'))
for account in ACCOUNTS.values():
if account.get('banned', False):
banned.append(account)
continue
if not account.get('level', False):
invalid.append(account)
continue
if account.get('level') < 2:
underlevel.append(account)
writer.writerow((account['username'],
account['password'],
account['provider'],
account['model'],
account['iOS'],
account['id']))
if banned:
banned_file = monocle_dir / 'banned.csv'
write_header = not banned_file.exists()
with banned_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider', 'level', 'created', 'last used'))
for account in banned:
row = [account['username'], account['password'], account['provider']]
row.append(account.get('level'))
try:
row.append(datetime.fromtimestamp(account['created']).strftime('%x %X'))
except KeyError:
row.append(None)
try:
row.append(datetime.fromtimestamp(account['time']).strftime('%x %X'))
except KeyError:
row.append(None)
writer.writerow(row)
if invalid:
invalid_file = monocle_dir / 'invalid.csv'
write_header = not invalid_file.exists()
with invalid_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider'))
for account in invalid:
row = [account['username'], account['password'], account['provider']]
writer.writerow(row)
if underlevel:
underlevel_file = monocle_dir / 'underlevel.csv'
write_header = not underlevel_file.exists()
with underlevel_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider'))
for account in underlevel:
row = [account['username'], account['password'], account['provider'], account['model'], account['iOS'], account['id']]
writer.writerow(row)
print('Done!')
| Python | 0 |
0c1952c70358494cafcd5b6d2bbee31bdd1a5cb1 | update post code detector documentation | scrubadub/detectors/postalcode.py | scrubadub/detectors/postalcode.py | import re
from .base import RegionLocalisedRegexDetector
from ..filth.postalcode import PostalCodeFilth
class PostalCodeDetector(RegionLocalisedRegexDetector):
"""Detects postal codes, currently only British post codes are supported."""
filth_cls = PostalCodeFilth
name = 'postalcode'
region_regex = {
'GB': re.compile(r"""
(
(?:[gG][iI][rR] {0,}0[aA]{2})|
(?:
(?:
[aA][sS][cC][nN]|
[sS][tT][hH][lL]|
[tT][dD][cC][uU]|
[bB][bB][nN][dD]|
[bB][iI][qQ][qQ]|
[fF][iI][qQ][qQ]|
[pP][cC][rR][nN]|
[sS][iI][qQ][qQ]|
[iT][kK][cC][aA]
)
\ {0,}1[zZ]{2}
)|
(?:
(?:
(?:[a-pr-uwyzA-PR-UWYZ][a-hk-yxA-HK-XY]?[0-9][0-9]?)|
(?:
(?:[a-pr-uwyzA-PR-UWYZ][0-9][a-hjkstuwA-HJKSTUW])|
(?:[a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y][0-9][abehmnprv-yABEHMNPRV-Y])
)
)
\ {0,}[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2}
)
)
""", re.VERBOSE),
}
| import re
from .base import RegionLocalisedRegexDetector
from ..filth.postalcode import PostalCodeFilth
class PostalCodeDetector(RegionLocalisedRegexDetector):
"""Detects british postcodes."""
filth_cls = PostalCodeFilth
name = 'postalcode'
region_regex = {
'GB': re.compile(r"""
(
(?:[gG][iI][rR] {0,}0[aA]{2})|
(?:
(?:
[aA][sS][cC][nN]|
[sS][tT][hH][lL]|
[tT][dD][cC][uU]|
[bB][bB][nN][dD]|
[bB][iI][qQ][qQ]|
[fF][iI][qQ][qQ]|
[pP][cC][rR][nN]|
[sS][iI][qQ][qQ]|
[iT][kK][cC][aA]
)
\ {0,}1[zZ]{2}
)|
(?:
(?:
(?:[a-pr-uwyzA-PR-UWYZ][a-hk-yxA-HK-XY]?[0-9][0-9]?)|
(?:
(?:[a-pr-uwyzA-PR-UWYZ][0-9][a-hjkstuwA-HJKSTUW])|
(?:[a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y][0-9][abehmnprv-yABEHMNPRV-Y])
)
)
\ {0,}[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2}
)
)
""", re.VERBOSE),
}
| Python | 0 |
6eff7f5d614a89a298fd31f83e0a514193b1d73a | add plot for cot_globale | c2cstats/plots.py | c2cstats/plots.py | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import datetime
import string
import os.path
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
OUTPUT_DIR = "_output"
FILE_EXT = ".svg"
MONTHS = {u'janvier': 1, u'février': 2, u'mars': 3, u'avril': 4, u'mai': 5,
u'juin': 6, u'juillet': 7, u'août': 8, u'septembre': 9,
u'octobre': 10, u'novembre': 11, u'décembre': 12}
COTATION_GLOBALE = ('F', 'PD-', 'PD', 'PD+', 'AD-', 'AD', 'AD+', 'D-', 'D', 'D+',
'TD-', 'TD', 'TD+', 'ED-', 'ED', 'ED+', 'ED4', 'ED5', 'ED6', 'ED7')
def plot_all(data):
plot_date(data.date)
plot_activity(data.activity)
plot_area(data.area)
plot_cot_globale(data.cot_globale)
def get_filepath(name):
return os.path.join(OUTPUT_DIR, name+FILE_EXT)
def plot_date(data):
"Plot histogram of years"
year = [int(string.split(i)[2]) for i in data]
# n, bins, patches = plt.hist(year, max(year)-min(year)+1,
# range = (min(year)-0.5, max(year)+0.5))
n, bins = np.histogram(year, max(year) - min(year) + 1,
range=(min(year) - 0.5, max(year) + 0.5))
plt.figure()
plt.bar(bins[:-1], n)
plt.xlabel(u'Année')
plt.ylabel('Nb de sorties')
plt.title('Nb de sorties par an')
# plt.axis([min(year), max(year), 0, max(n)+1])
labels = [str(i) for i in range(min(year), max(year) + 1)]
plt.xticks(bins[:-1] + 0.4, labels)
# plt.yticks(np.arange(0,81,10))
# plt.legend( (p1[0], p2[0]), ('Men', 'Women')
plt.savefig(get_filepath('years'))
# try with plot_date
d = []
for i in data:
t = i.split(' ')
d.append(datetime.date(int(t[2]), MONTHS[t[1]], int(t[0])))
plt.figure()
plt.plot_date(d, np.ones(100))
plt.savefig(get_filepath('timeline'))
def plot_activity(data):
"Pie plot for activities"
c = Counter(data)
explode = np.zeros(len(c)) + 0.05
plt.figure()
plt.pie(c.values(), labels=c.keys(), explode=explode, shadow=True, autopct='%d')
plt.title(u'Répartition par activité')
plt.savefig(get_filepath('activities'))
def plot_area(data):
"Pie plot for areas"
c = Counter(data)
use = c.most_common(10)
labels = [k for k,v in use]
counts = [v for k,v in use]
labels.append(u'Autres')
counts.append(sum(c.values()) - sum(counts))
explode = np.zeros(len(counts)) + 0.05
plt.figure()
plt.pie(counts, labels=labels, explode=explode, shadow=True, autopct='%d')
plt.title(u'Répartition par région')
plt.savefig(get_filepath('regions'))
def plot_cot_globale(data):
"Hist plot for cot_globale"
c = Counter(data)
counts = [c[k] for k in COTATION_GLOBALE]
x = np.arange(len(counts))
plt.figure()
plt.bar(x, counts)
plt.xlabel(u'Cotation globale')
plt.xticks(x + 0.4, COTATION_GLOBALE)
plt.savefig(get_filepath('cot_global'))
| #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import datetime
import string
import os.path
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
OUTPUT_DIR = "_output"
FILE_EXT = ".svg"
MONTHS = {u'janvier': 1, u'février': 2, u'mars': 3, u'avril': 4, u'mai': 5,
u'juin': 6, u'juillet': 7, u'août': 8, u'septembre': 9,
u'octobre': 10, u'novembre': 11, u'décembre': 12}
def plot_all(data):
plot_date(data.date)
plot_activity(data.activity)
plot_area(data.area)
def get_filepath(name):
return os.path.join(OUTPUT_DIR, name+FILE_EXT)
def plot_date(data):
"Plot histogram of years"
year = [int(string.split(i)[2]) for i in data]
# n, bins, patches = plt.hist(year, max(year)-min(year)+1,
# range = (min(year)-0.5, max(year)+0.5))
n, bins = np.histogram(year, max(year) - min(year) + 1,
range=(min(year) - 0.5, max(year) + 0.5))
plt.figure()
plt.bar(bins[:-1], n)
plt.xlabel(u'Année')
plt.ylabel('Nb de sorties')
plt.title('Nb de sorties par an')
# plt.axis([min(year), max(year), 0, max(n)+1])
labels = [str(i) for i in range(min(year), max(year) + 1)]
plt.xticks(bins[:-1] + 0.4, labels)
# plt.yticks(np.arange(0,81,10))
# plt.legend( (p1[0], p2[0]), ('Men', 'Women')
plt.savefig(get_filepath('years'))
# try with plot_date
d = []
for i in data:
t = i.split(' ')
d.append(datetime.date(int(t[2]), MONTHS[t[1]], int(t[0])))
plt.figure()
plt.plot_date(d, np.ones(100))
plt.savefig(get_filepath('timeline'))
def plot_activity(data):
"Pie plot for activities"
c = Counter(data)
explode = np.zeros(len(c)) + 0.05
plt.figure()
plt.pie(c.values(), labels=c.keys(), explode=explode, shadow=True, autopct='%d')
plt.title(u'Répartition par activité')
plt.savefig(get_filepath('activities'))
def plot_area(data):
"Pie plot for areas"
c = Counter(data)
use = c.most_common(10)
labels = [k for k,v in use]
counts = [v for k,v in use]
labels.append(u'Autres')
counts.append(sum(c.values()) - sum(counts))
explode = np.zeros(len(counts)) + 0.05
plt.figure()
plt.pie(counts, labels=labels, explode=explode, shadow=True, autopct='%d')
plt.title(u'Répartition par région')
plt.savefig(get_filepath('regions'))
| Python | 0.000002 |
1d2ea0c72d8700687761125e4eaf90ec52f419be | Fix ORM call and add progress check | custom/icds_reports/management/commands/update_aadhar_date.py | custom/icds_reports/management/commands/update_aadhar_date.py | from __future__ import absolute_import, print_function
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from django.db import connections
from corehq.apps.locations.models import SQLLocation
from corehq.sql_db.routers import db_for_read_write
from custom.icds_reports.models import ChildHealthMonthly
CHILD_TABLENAME = "config_report_icds-cas_static-child_health_cases_a46c129f"
PERSON_TABLENAME = "config_report_icds-cas_static-person_cases_v2_b4b5d57a"
UPDATE_QUERY = """
UPDATE "{child_tablename}" child SET
aadhar_date = person.aadhar_date
FROM "{person_tablename}" person
WHERE child.mother_id = person.doc_id AND child.supervisor_id = %(sup_id)s AND person.supervisor_id = %(sup_id)s
""".format(child_tablename=CHILD_TABLENAME, person_tablename=PERSON_TABLENAME)
def get_cursor(model):
db = db_for_read_write(model)
return connections[db].cursor()
class Command(BaseCommand):
def handle(self, *args, **options):
supervisor_ids = (
SQLLocation.objects
.filter(domain='icds-cas', location_type__name='supervisor')
.values_list('location_id', flat=True)
)
count = 0
num_ids = len(supervisor_ids)
for sup_id in supervisor_ids:
with get_cursor(ChildHealthMonthly) as cursor:
cursor.execute(UPDATE_QUERY, {"sup_id": sup_id})
count += 1
if count % 100 == 0:
print("{} / {}".format(count, num_ids))
| from __future__ import absolute_import, print_function
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from django.db import connections
from corehq.apps.locations.models import SQLLocation
from corehq.sql_db.routers import db_for_read_write
from custom.icds_reports.models import ChildHealthMonthly
CHILD_TABLENAME = "config_report_icds-cas_static-child_health_cases_a46c129f"
PERSON_TABLENAME = "config_report_icds-cas_static-person_cases_v2_b4b5d57a"
UPDATE_QUERY = """
UPDATE "{child_tablename}" child SET
aadhar_date = person.aadhar_date
FROM "{person_tablename}" person
WHERE child.mother_id = person.doc_id AND child.supervisor_id = %(sup_id)s AND person.supervisor_id = %(sup_id)s
""".format(child_tablename=CHILD_TABLENAME, person_tablename=PERSON_TABLENAME)
def get_cursor(model):
db = db_for_read_write(model)
return connections[db].cursor()
class Command(BaseCommand):
def handle(self, *args, **options):
supervisor_ids = (
SQLLocation.objects
.filter(domain='icds-cas', location_type__name='supervisor')
.values('location_id')
)
for sup_id in supervisor_ids:
with get_cursor(ChildHealthMonthly) as cursor:
cursor.execute(UPDATE_QUERY, {"sup_id": sup_id})
| Python | 0 |
e2d8a32590c0865b2a8339d86af4eb9b34ea5d20 | Update __init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/messages/%s'
self._etcd_cls = _ClusterMessageEtcd
def save(self):
super(ClusterMessage, self).save(update=False)
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| Python | 0.000072 |
724b4c382015aa933659a24f7be3bd2cabbcb5eb | Add flag --exclusive setting whether to run as exclusive or not | sherlock.stanford.edu.run_gpaw.py | sherlock.stanford.edu.run_gpaw.py | #!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if '--exclusive' in argv:
is_exclusive = True
argv.remove('--exclusive')
else:
is_exclusive = False
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash\n""")
if is_exclusive:
f.write("""#SBATCH --exclusive\n""")
f.write("""\
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
#source $HOME/environment_scripts/set_paths_gpaw_1.1.1b1_libxc-trunk.sh # gpaw version 1.1.1b
#source $HOME/environment_scripts/set_paths_gpaw_1.1.1b1_libxc-trunk_scalapack_libvdwxc.sh # gpaw version 1.1.1b with scalapack (does not work) and libvdwxc (works)
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
| """This is the submission script for GPAW on Sherlock at Stanford"""
#!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
#SBATCH --exclusive
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
| Python | 0 |
31cec1c5ab052f237445b8969088aba755ae73cf | Clean up now-unnecessary DummyStorage(). | incuna_test_utils/testcases/integration.py | incuna_test_utils/testcases/integration.py | from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render
from .request import BaseRequestTestCase
class BaseIntegrationTestCase(BaseRequestTestCase):
"""
A TestCase that operates similarly to a Selenium test.
Contains methods that access pages and render them to strings full of
HTML. Can be used to assert the contents of templates as well as doing
normal TestCase things.
Must be subclassed with the following attributes in order to work:
* user_factory
* view_class (class-based view) or view (method-based view)
"""
def get_view(self):
"""
Returns the class's attached view.
Checks self.view_class, then self.view. Throws an ImproperlyConfigured
exception if neither exist.
"""
try:
return self.view_class.as_view()
except AttributeError:
# Continue on to the next try/catch
pass
try:
return self.view
except AttributeError:
message = "This test must have a 'view_class' or 'view' attribute."
raise ImproperlyConfigured(message)
def access_view(self, *args, request=None, **kwargs):
"""
Helper method that accesses the test's view.
Accepts a request parameter, which can be None. If it is, this method
creates a basic request on your behalf.
Returns a HTTPResponse object with the request (created or otherwise)
attached.
"""
if request is None:
request = self.create_request()
view = self.get_view()
response = view(request, *args, **kwargs)
# Add the request to the response.
# This is a weird-looking but compact way of ensuring we have access to
# the request everywhere we need it, without doing clunky things like
# returning tuples all the time.
response.request = request
return response
def render_to_str(self, response, request=None):
"""
Render a HTTPResponse into a string that holds the HTML content.
Accepts an optional request parameter, and looks for a request attached
to the response if the optional parameter isn't specified.
"""
if request is None:
request = response.request
response = render(request, response.template_name, response.context_data)
return str(response.content)
def access_view_and_render_response(self, *view_args, request=None, expected_status=200, **view_kwargs):
"""
Accesses the view and returns a string of HTML.
Combines access_view, an assertion on the returned status, and
render_to_str.
Accepts an optional request (but will create a simple one if the
parameter isn't supplied), an expected status code for the response
(which defaults to 200), and args and kwargs for the view method.
"""
response = self.access_view(*view_args, request=request, **view_kwargs)
# Assert that the response has the correct status code before we go
# any further. Throwing accurately descriptive failures when something
# goes wrong is better than trying to run assertions on the content
# of a HTML response for some random 404 page.
self.assertEqual(expected_status, response.status_code)
# Render the response and return it.
return self.render_to_str(response)
def assert_count(self, needle, haystack, count):
"""
Assert that 'needle' occurs exactly 'count' times in 'haystack'.
Used as a snazzier, stricter version of unittest.assertIn.
Outputs a verbose error message when it fails.
"""
actual_count = haystack.count(needle)
# Build a verbose error message in case we need it.
plural = '' if count == 1 else 's'
message = 'Expected {count} instance{plural} of {needle}, but found {actual_count}, in {haystack}'
message = message.format_map(locals())
# Make the assertion.
self.assertEqual(count, actual_count, message)
| from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render
from .request import BaseRequestTestCase
class BaseIntegrationTestCase(BaseRequestTestCase):
"""
A TestCase that operates similarly to a Selenium test.
Contains methods that access pages and render them to strings full of
HTML. Can be used to assert the contents of templates as well as doing
normal TestCase things.
Must be subclassed with the following attributes in order to work:
* user_factory
* view_class (class-based view) or view (method-based view)
"""
def get_view(self):
"""
Returns the class's attached view.
Checks self.view_class, then self.view. Throws an ImproperlyConfigured
exception if neither exist.
"""
try:
return self.view_class.as_view()
except AttributeError:
# Continue on to the next try/catch
pass
try:
return self.view
except AttributeError:
message = "This test must have a 'view_class' or 'view' attribute."
raise ImproperlyConfigured(message)
def access_view(self, *args, request=None, **kwargs):
"""
Helper method that accesses the test's view.
Accepts a request parameter, which can be None. If it is, this method
creates a basic request on your behalf.
Returns a HTTPResponse object with the request (created or otherwise)
attached.
"""
if request is None:
request = self.create_request()
request._messages = DummyStorage()
view = self.get_view()
response = view(request, *args, **kwargs)
# Add the request to the response.
# This is a weird-looking but compact way of ensuring we have access to
# the request everywhere we need it, without doing clunky things like
# returning tuples all the time.
response.request = request
return response
def render_to_str(self, response, request=None):
"""
Render a HTTPResponse into a string that holds the HTML content.
Accepts an optional request parameter, and looks for a request attached
to the response if the optional parameter isn't specified.
"""
if request is None:
request = response.request
response = render(request, response.template_name, response.context_data)
return str(response.content)
def access_view_and_render_response(self, *view_args, request=None, expected_status=200, **view_kwargs):
"""
Accesses the view and returns a string of HTML.
Combines access_view, an assertion on the returned status, and
render_to_str.
Accepts an optional request (but will create a simple one if the
parameter isn't supplied), an expected status code for the response
(which defaults to 200), and args and kwargs for the view method.
"""
response = self.access_view(*view_args, request=request, **view_kwargs)
# Assert that the response has the correct status code before we go
# any further. Throwing accurately descriptive failures when something
# goes wrong is better than trying to run assertions on the content
# of a HTML response for some random 404 page.
self.assertEqual(expected_status, response.status_code)
# Render the response and return it.
return self.render_to_str(response)
def assert_count(self, needle, haystack, count):
"""
Assert that 'needle' occurs exactly 'count' times in 'haystack'.
Used as a snazzier, stricter version of unittest.assertIn.
Outputs a verbose error message when it fails.
"""
actual_count = haystack.count(needle)
# Build a verbose error message in case we need it.
plural = '' if count == 1 else 's'
message = 'Expected {count} instance{plural} of {needle}, but found {actual_count}, in {haystack}'
message = message.format_map(locals())
# Make the assertion.
self.assertEqual(count, actual_count, message)
| Python | 0 |
ec25f9c1b0212f1f23855eab22078d1563cd7165 | Use int for survey_id and question_id | indico/modules/events/surveys/blueprint.py | indico/modules/events/surveys/blueprint.py | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.surveys.controllers.management import (RHManageSurveys, RHCreateSurvey, RHManageSurvey,
RHEditSurvey, RHScheduleSurvey, RHStartSurvey,
RHEndSurvey, RHManageSurveyQuestionnaire,
RHAddSurveyQuestion, RHEditSurveyQuestion,
RHDeleteSurveyQuestion, RHChangeQuestionPosition)
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('survey', __name__, template_folder='templates', virtual_template_folder='events/surveys',
url_prefix='/event/<confId>', event_feature='surveys')
# surveys management
_bp.add_url_rule('/manage/surveys/', 'management', RHManageSurveys)
_bp.add_url_rule('/manage/surveys/create', 'create', RHCreateSurvey, methods=('GET', 'POST'))
# Single survey management
_bp.add_url_rule('/manage/surveys/<int:survey_id>/', 'manage_survey', RHManageSurvey)
_bp.add_url_rule('/manage/surveys/<int:survey_id>/edit', 'edit_survey', RHEditSurvey, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<int:survey_id>/schedule', 'schedule_survey', RHScheduleSurvey,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<int:survey_id>/start', 'start_survey', RHStartSurvey, methods=('POST',))
_bp.add_url_rule('/manage/surveys/<int:survey_id>/end', 'end_survey', RHEndSurvey, methods=('POST',))
# Survey question management
_bp.add_url_rule('/manage/surveys/<int:survey_id>/questionnaire/', 'manage_questionnaire', RHManageSurveyQuestionnaire)
_bp.add_url_rule('/manage/surveys/<int:survey_id>/questionnaire/add/<type>', 'add_question', RHAddSurveyQuestion,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<int:survey_id>/questionnaire/<int:question_id>', 'edit_question',
RHEditSurveyQuestion, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<int:survey_id>/questionnaire/<int:question_id>/delete', 'remove_question',
RHDeleteSurveyQuestion, methods=('POST',))
_bp.add_url_rule('/manage/surveys/<int:survey_id>/questionnaire/change-positions', 'change_question_position',
RHChangeQuestionPosition, methods=('POST',))
| # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.surveys.controllers.management import (RHManageSurveys, RHCreateSurvey, RHManageSurvey,
RHEditSurvey, RHScheduleSurvey, RHStartSurvey,
RHEndSurvey, RHManageSurveyQuestionnaire,
RHAddSurveyQuestion, RHEditSurveyQuestion,
RHDeleteSurveyQuestion, RHChangeQuestionPosition)
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('survey', __name__, template_folder='templates', virtual_template_folder='events/surveys',
url_prefix='/event/<confId>', event_feature='surveys')
# surveys management
_bp.add_url_rule('/manage/surveys/', 'management', RHManageSurveys)
_bp.add_url_rule('/manage/surveys/create', 'create', RHCreateSurvey, methods=('GET', 'POST'))
# Single survey management
_bp.add_url_rule('/manage/surveys/<survey_id>/', 'manage_survey', RHManageSurvey)
_bp.add_url_rule('/manage/surveys/<survey_id>/edit', 'edit_survey', RHEditSurvey, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<survey_id>/schedule', 'schedule_survey', RHScheduleSurvey, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<survey_id>/start', 'start_survey', RHStartSurvey, methods=('POST',))
_bp.add_url_rule('/manage/surveys/<survey_id>/end', 'end_survey', RHEndSurvey, methods=('POST',))
# Survey question management
_bp.add_url_rule('/manage/surveys/<survey_id>/questionnaire/', 'manage_questionnaire', RHManageSurveyQuestionnaire)
_bp.add_url_rule('/manage/surveys/<survey_id>/questionnaire/add/<type>', 'add_question', RHAddSurveyQuestion,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<survey_id>/questionnaire/<question_id>', 'edit_question', RHEditSurveyQuestion,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/surveys/<survey_id>/questionnaire/<question_id>/delete', 'remove_question',
RHDeleteSurveyQuestion, methods=('POST',))
_bp.add_url_rule('/manage/surveys/<survey_id>/questionnaire/change-positions', 'change_question_position',
RHChangeQuestionPosition, methods=('POST',))
| Python | 0.000003 |
da0289cf8e95b3f462b4596bb9534d7bb853cae8 | Add test for reply_affinity = True case. | vumi/dispatchers/tests/test_load_balancer.py | vumi/dispatchers/tests/test_load_balancer.py | """Tests for vumi.dispatchers.load_balancer."""
from twisted.internet.defer import inlineCallbacks
from vumi.tests.utils import VumiWorkerTestCase
from vumi.dispatchers.tests.utils import DummyDispatcher
from vumi.dispatchers.load_balancer import LoadBalancingRouter
class BaseLoadBalancingTestCase(VumiWorkerTestCase):
reply_affinity = None
@inlineCallbacks
def setUp(self):
yield super(BaseLoadBalancingTestCase, self).setUp()
config = {
"transport_names": [
"transport_1",
"transport_2",
],
"exposed_names": ["round_robin"],
"router_class": ("vumi.dispatchers.load_balancer."
"LoadBalancingRouter"),
}
if self.reply_affinity is not None:
config['reply_affinity'] = self.reply_affinity
self.dispatcher = DummyDispatcher(config)
self.router = LoadBalancingRouter(self.dispatcher, config)
yield self.router.setup_routing()
@inlineCallbacks
def tearDown(self):
yield super(BaseLoadBalancingTestCase, self).tearDown()
yield self.router.teardown_routing()
class TestLoadBalancingWithoutReplyAffinity(BaseLoadBalancingTestCase):
reply_affinity = None
def test_inbound_message_routing(self):
msg1 = self.mkmsg_in(content='msg 1', transport_name='transport_1')
self.router.dispatch_inbound_message(msg1)
msg2 = self.mkmsg_in(content='msg 2', transport_name='transport_2')
self.router.dispatch_inbound_message(msg2)
publishers = self.dispatcher.exposed_publisher
self.assertEqual(publishers['round_robin'].msgs, [msg1, msg2])
def test_inbound_event_routing(self):
msg1 = self.mkmsg_ack(transport_name='transport_1')
self.router.dispatch_inbound_event(msg1)
msg2 = self.mkmsg_ack(transport_name='transport_2')
self.router.dispatch_inbound_event(msg2)
publishers = self.dispatcher.exposed_event_publisher
self.assertEqual(publishers['round_robin'].msgs, [msg1, msg2])
def test_outbound_message_routing(self):
msg1 = self.mkmsg_out(content='msg 1')
self.router.dispatch_outbound_message(msg1)
msg2 = self.mkmsg_out(content='msg 2')
self.router.dispatch_outbound_message(msg2)
msg3 = self.mkmsg_out(content='msg 3')
self.router.dispatch_outbound_message(msg3)
publishers = self.dispatcher.transport_publisher
self.assertEqual(publishers['transport_1'].msgs, [msg1, msg3])
self.assertEqual(publishers['transport_2'].msgs, [msg2])
class TestLoadBalancingWithReplyAffinity(BaseLoadBalancingTestCase):
reply_affinity = True
def test_inbound_message_routing(self):
msg1 = self.mkmsg_in(content='msg 1', transport_name='transport_1')
self.router.dispatch_inbound_message(msg1)
msg2 = self.mkmsg_in(content='msg 2', transport_name='transport_2')
self.router.dispatch_inbound_message(msg2)
publishers = self.dispatcher.exposed_publisher
self.assertEqual(publishers['round_robin'].msgs, [msg1, msg2])
def test_inbound_event_routing(self):
msg1 = self.mkmsg_ack(transport_name='transport_1')
self.router.dispatch_inbound_event(msg1)
msg2 = self.mkmsg_ack(transport_name='transport_2')
self.router.dispatch_inbound_event(msg2)
publishers = self.dispatcher.exposed_event_publisher
self.assertEqual(publishers['round_robin'].msgs, [msg1, msg2])
def test_outbound_message_routing(self):
msg1 = self.mkmsg_out(content='msg 1')
self.router.push_transport_name(msg1, 'transport_1')
self.router.dispatch_outbound_message(msg1)
msg2 = self.mkmsg_out(content='msg 2')
self.router.push_transport_name(msg2, 'transport_1')
self.router.dispatch_outbound_message(msg2)
publishers = self.dispatcher.transport_publisher
self.assertEqual(publishers['transport_1'].msgs, [msg1, msg2])
self.assertEqual(publishers['transport_2'].msgs, [])
1
| """Tests for vumi.dispatchers.load_balancer."""
from twisted.internet.defer import inlineCallbacks
from vumi.tests.utils import VumiWorkerTestCase
from vumi.dispatchers.tests.utils import DummyDispatcher
from vumi.dispatchers.load_balancer import LoadBalancingRouter
class TestLoadBalancingRouter(VumiWorkerTestCase):
@inlineCallbacks
def setUp(self):
yield super(TestLoadBalancingRouter, self).setUp()
config = {
"transport_names": [
"transport_1",
"transport_2",
],
"exposed_names": ["round_robin"],
"router_class": ("vumi.dispatchers.load_balancer."
"LoadBalancingRouter"),
}
self.dispatcher = DummyDispatcher(config)
self.router = LoadBalancingRouter(self.dispatcher, config)
yield self.router.setup_routing()
@inlineCallbacks
def tearDown(self):
yield super(TestLoadBalancingRouter, self).tearDown()
yield self.router.teardown_routing()
def test_inbound_message_routing(self):
msg1 = self.mkmsg_in(content='msg 1', transport_name='transport_1')
self.router.dispatch_inbound_message(msg1)
msg2 = self.mkmsg_in(content='msg 2', transport_name='transport_2')
self.router.dispatch_inbound_message(msg2)
publishers = self.dispatcher.exposed_publisher
self.assertEqual(publishers['round_robin'].msgs, [msg1, msg2])
def test_inbound_event_routing(self):
msg1 = self.mkmsg_ack(transport_name='transport_1')
self.router.dispatch_inbound_event(msg1)
msg2 = self.mkmsg_ack(transport_name='transport_2')
self.router.dispatch_inbound_event(msg2)
publishers = self.dispatcher.exposed_event_publisher
self.assertEqual(publishers['round_robin'].msgs, [msg1, msg2])
def test_outbound_message_routing(self):
msg1 = self.mkmsg_out(content='msg 1')
self.router.dispatch_outbound_message(msg1)
msg2 = self.mkmsg_out(content='msg 2')
self.router.dispatch_outbound_message(msg2)
publishers = self.dispatcher.transport_publisher
self.assertEqual(publishers['transport_1'].msgs, [msg1])
self.assertEqual(publishers['transport_2'].msgs, [msg2])
| Python | 0.000001 |
76f19afd5cfb084327740de9346781e730d764f9 | Add message method for create etc | iatiupdates/models.py | iatiupdates/models.py |
# IATI Updates, IATI Registry API augmented
# by Mark Brough
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from sqlalchemy import *
from iatiupdates import db
from datetime import datetime
class Package(db.Model):
__tablename__ = 'package'
id = Column(UnicodeText, primary_key=True)
packagegroup_id = Column(UnicodeText, ForeignKey('packagegroup.id', ondelete='CASCADE'))
metadata_created = Column(DateTime)
metadata_modified = Column(DateTime)
relationships = Column(UnicodeText)
author_email = Column(UnicodeText)
state = Column(UnicodeText)
license_id = Column(UnicodeText)
resources = Column(UnicodeText)
tags = Column(UnicodeText)
groups = Column(UnicodeText)
name = Column(UnicodeText)
isopen = Column(UnicodeText)
license = Column(UnicodeText)
notes_rendered = Column(UnicodeText)
ckan_url = Column(UnicodeText)
title = Column(UnicodeText)
extras = Column(UnicodeText)
ratings_count = Column(UnicodeText)
revision_id = Column(UnicodeText)
notes = Column(UnicodeText)
ratings_average = Column(UnicodeText)
author = Column(UnicodeText)
packagegroup_name = Column(UnicodeText)
issue_type = Column(UnicodeText, ForeignKey('issuetype.id', ondelete='CASCADE'))
issue_message = Column(UnicodeText)
issue_date = Column(UnicodeText)
hash = Column(UnicodeText)
url = Column(UnicodeText)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class PackageGroup(db.Model):
__tablename__ = 'packagegroup'
id = Column(UnicodeText, primary_key=True)
display_name = Column(UnicodeText)
description = Column(UnicodeText)
created = Column(DateTime)
title = Column(UnicodeText)
state = Column(UnicodeText)
extras = Column(UnicodeText)
revision_id = Column(UnicodeText)
packages = Column(UnicodeText)
name = Column(UnicodeText)
frequency = Column(Integer)
frequency_comment = Column(UnicodeText)
deleted = Column(Boolean)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Revision(db.Model):
__tablename__ = 'revision'
id = Column(UnicodeText, primary_key=True)
timestamp = Column(DateTime)
package_id = Column(UnicodeText, ForeignKey('package.id', ondelete='CASCADE'))
message = Column(UnicodeText)
author = Column(UnicodeText)
group_id = Column(UnicodeText, ForeignKey('packagegroup.id', ondelete='CASCADE'))
message_type = Column(UnicodeText)
message_text = Column(UnicodeText)
message_method = Column(UnicodeText)
date = Column(DateTime)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class IssueType(db.Model):
__tablename__ = 'issuetype'
id = Column(UnicodeText, primary_key=True)
name = Column(UnicodeText)
|
# IATI Updates, IATI Registry API augmented
# by Mark Brough
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from sqlalchemy import *
from iatiupdates import db
from datetime import datetime
class Package(db.Model):
__tablename__ = 'package'
id = Column(UnicodeText, primary_key=True)
packagegroup_id = Column(UnicodeText, ForeignKey('packagegroup.id', ondelete='CASCADE'))
metadata_created = Column(DateTime)
metadata_modified = Column(DateTime)
relationships = Column(UnicodeText)
author_email = Column(UnicodeText)
state = Column(UnicodeText)
license_id = Column(UnicodeText)
resources = Column(UnicodeText)
tags = Column(UnicodeText)
groups = Column(UnicodeText)
name = Column(UnicodeText)
isopen = Column(UnicodeText)
license = Column(UnicodeText)
notes_rendered = Column(UnicodeText)
ckan_url = Column(UnicodeText)
title = Column(UnicodeText)
extras = Column(UnicodeText)
ratings_count = Column(UnicodeText)
revision_id = Column(UnicodeText)
notes = Column(UnicodeText)
ratings_average = Column(UnicodeText)
author = Column(UnicodeText)
packagegroup_name = Column(UnicodeText)
issue_type = Column(UnicodeText, ForeignKey('issuetype.id', ondelete='CASCADE'))
issue_message = Column(UnicodeText)
issue_date = Column(UnicodeText)
hash = Column(UnicodeText)
url = Column(UnicodeText)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class PackageGroup(db.Model):
__tablename__ = 'packagegroup'
id = Column(UnicodeText, primary_key=True)
display_name = Column(UnicodeText)
description = Column(UnicodeText)
created = Column(DateTime)
title = Column(UnicodeText)
state = Column(UnicodeText)
extras = Column(UnicodeText)
revision_id = Column(UnicodeText)
packages = Column(UnicodeText)
name = Column(UnicodeText)
frequency = Column(Integer)
frequency_comment = Column(UnicodeText)
deleted = Column(Boolean)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Revision(db.Model):
__tablename__ = 'revision'
id = Column(UnicodeText, primary_key=True)
timestamp = Column(DateTime)
package_id = Column(UnicodeText, ForeignKey('package.id', ondelete='CASCADE'))
message = Column(UnicodeText)
author = Column(UnicodeText)
group_id = Column(UnicodeText, ForeignKey('packagegroup.id', ondelete='CASCADE'))
message_type = Column(UnicodeText)
message_text = Column(UnicodeText)
date = Column(DateTime)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class IssueType(db.Model):
__tablename__ = 'issuetype'
id = Column(UnicodeText, primary_key=True)
name = Column(UnicodeText)
| Python | 0 |
c019c337c8642006a7a851c40bbedbb2c32fc5b5 | Add nuclear option to delete all available caches | wger/core/management/commands/clear-cache.py | wger/core/management/commands/clear-cache.py | # -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.core.cache import cache
from wger.core.models import Language
from wger.manager.models import Workout, WorkoutLog
from wger.exercises.models import Exercise
from wger.utils.cache import (
reset_workout_canonical_form,
reset_workout_log,
delete_template_fragment_cache
)
class Command(BaseCommand):
'''
Clears caches (HTML, etc.)
'''
option_list = BaseCommand.option_list + (
make_option('--clear-template',
action='store_true',
dest='clear_template',
default=False,
help='Clear only template caches'),
make_option('--clear-workout-cache',
action='store_true',
dest='clear_workout',
default=False,
help='Clear only the workout canonical view'),
make_option('--clear-all',
action='store_true',
dest='clear_all',
default=False,
help='Clear ALL cached entries'),
)
help = 'Clears the application cache. You *must* pass an option selecting ' \
'what exactly you want to clear. See available options.'
def handle(self, *args, **options):
'''
Process the options
'''
if (not options['clear_template']
and not options['clear_workout']
and not options['clear_all']):
raise CommandError('Please select what cache you need to delete, see help')
# Exercises, cached template fragments
if options['clear_template']:
if int(options['verbosity']) >= 2:
self.stdout.write("*** Clearing templates")
for user in User.objects.all():
if int(options['verbosity']) >= 2:
self.stdout.write("* Processing user {0}".format(user.username))
for entry in WorkoutLog.objects.filter(user=user).dates('date', 'year'):
if int(options['verbosity']) >= 3:
self.stdout.write(" Year {0}".format(entry.year))
for month in WorkoutLog.objects.filter(user=user,
date__year=entry.year).dates('date',
'month'):
if int(options['verbosity']) >= 3:
self.stdout.write(" Month {0}".format(entry.month))
reset_workout_log(user.id, entry.year, entry.month)
for day in WorkoutLog.objects.filter(user=user,
date__year=entry.year,
date__month=month.month).dates('date',
'day'):
if int(options['verbosity']) >= 3:
self.stdout.write(" Day {0}".format(day.day))
reset_workout_log(user.id, entry.year, entry.month, day)
for language in Language.objects.all():
delete_template_fragment_cache('muscle-overview', language.id)
delete_template_fragment_cache('exercise-overview', language.id)
delete_template_fragment_cache('exercise-overview-mobile', language.id)
delete_template_fragment_cache('equipment-overview', language.id)
for language in Language.objects.all():
for exercise in Exercise.objects.all():
delete_template_fragment_cache('exercise-detail-header',
exercise.id,
language.id)
delete_template_fragment_cache('exercise-detail-muscles',
exercise.id,
language.id)
# Workout canonical form
if options['clear_workout']:
for w in Workout.objects.all():
reset_workout_canonical_form(w.pk)
# Nuclear option, clear all
if options['clear_all']:
cache.clear()
| # -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from wger.core.models import Language
from wger.manager.models import Workout, WorkoutLog
from wger.exercises.models import Exercise
from wger.utils.cache import (
reset_workout_canonical_form,
reset_workout_log,
delete_template_fragment_cache
)
class Command(BaseCommand):
'''
Clears caches (HTML, etc.)
'''
option_list = BaseCommand.option_list + (
make_option('--clear-template',
action='store_true',
dest='clear_template',
default=False,
help='Clear only template caches'),
make_option('--clear-workout-cache',
action='store_true',
dest='clear_workout',
default=False,
help='Clear only the workout canonical view'),
)
help = 'Clears the application cache. You *must* pass an option selecting ' \
'what exactly you want to clear. See available options.'
def handle(self, *args, **options):
'''
Process the options
'''
if not options['clear_template'] and not options['clear_workout']:
raise CommandError('Please select what cache you need to delete, see help')
# Exercises, cached template fragments
if options['clear_template']:
if int(options['verbosity']) >= 2:
self.stdout.write("*** Clearing templates")
for user in User.objects.all():
if int(options['verbosity']) >= 2:
self.stdout.write("* Processing user {0}".format(user.username))
for entry in WorkoutLog.objects.filter(user=user).dates('date', 'year'):
if int(options['verbosity']) >= 3:
self.stdout.write(" Year {0}".format(entry.year))
for month in WorkoutLog.objects.filter(user=user,
date__year=entry.year).dates('date',
'month'):
if int(options['verbosity']) >= 3:
self.stdout.write(" Month {0}".format(entry.month))
reset_workout_log(user.id, entry.year, entry.month)
for day in WorkoutLog.objects.filter(user=user,
date__year=entry.year,
date__month=month.month).dates('date',
'day'):
if int(options['verbosity']) >= 3:
self.stdout.write(" Day {0}".format(day.day))
reset_workout_log(user.id, entry.year, entry.month, day)
for language in Language.objects.all():
delete_template_fragment_cache('muscle-overview', language.id)
delete_template_fragment_cache('exercise-overview', language.id)
delete_template_fragment_cache('exercise-overview-mobile', language.id)
delete_template_fragment_cache('equipment-overview', language.id)
for language in Language.objects.all():
for exercise in Exercise.objects.all():
delete_template_fragment_cache('exercise-detail-header',
exercise.id,
language.id)
delete_template_fragment_cache('exercise-detail-muscles',
exercise.id,
language.id)
# Workout canonical form
if options['clear_workout']:
for w in Workout.objects.all():
reset_workout_canonical_form(w.pk)
| Python | 0 |
5952c372ae01672bfce450aec924628faecd3654 | bump version for release | crossbar/crossbar/__init__.py | crossbar/crossbar/__init__.py | ###############################################################################
##
## Copyright (C) 2011-2015 Tavendo GmbH
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License, version 3,
## as published by the Free Software Foundation.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
__doc__ = """
Crossbar.io - Unified application router.
Crossbar.io is an open-source server software that allows developers to create
distributed systems, composed of application components which are loosely coupled,
communicate in (soft) real-time and can be implemented in different languages.
Crossbar.io features:
- application routing core (RPC+PubSub)
- full WAMP v2 AP implementation
- application component hosting
- multi-process architecture
- and more
For more information, please go to
* Homepage: http://crossbar.io/
* Documentation: https://github.com/crossbario/crossbar/wiki
* Source code: https://github.com/crossbario/crossbar
Open-source licensed under the GNU Affero General Public License version 3.
Created by Tavendo GmbH. Get in contact at http://tavendo.com
"""
__version__ = "0.10.1"
| ###############################################################################
##
## Copyright (C) 2011-2015 Tavendo GmbH
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License, version 3,
## as published by the Free Software Foundation.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
__doc__ = """
Crossbar.io - Unified application router.
Crossbar.io is an open-source server software that allows developers to create
distributed systems, composed of application components which are loosely coupled,
communicate in (soft) real-time and can be implemented in different languages.
Crossbar.io features:
- application routing core (RPC+PubSub)
- full WAMP v2 AP implementation
- application component hosting
- multi-process architecture
- and more
For more information, please go to
* Homepage: http://crossbar.io/
* Documentation: https://github.com/crossbario/crossbar/wiki
* Source code: https://github.com/crossbario/crossbar
Open-source licensed under the GNU Affero General Public License version 3.
Created by Tavendo GmbH. Get in contact at http://tavendo.com
"""
__version__ = "0.10.0"
| Python | 0 |
0b4fb3dd59ce0940026b1cf212adcf6d17bca7a0 | Refactor build_update_query (2) | mongots/query.py | mongots/query.py | from datetime import datetime
AGGREGATION_KEYS = [
'',
'months.{month}.',
'months.{month}.days.{day}.',
'months.{month}.days.{day}.hours.{hour}.',
]
DATETIME_KEY = 'datetime'
def build_filter_query(timestamp, tags=None):
filters = tags or {}
filters[DATETIME_KEY] = datetime(timestamp.year, 1, 1)
return filters
def build_update_query(value, timestamp):
inc_values = {
'count': 1,
'sum': value,
}
datetime_args = {
'month': str(timestamp.month - 1), # Array index: range from 0 to 11
'day': str(timestamp.day - 1), # Array index: range from 0 to 27 / 28 / 29 or 30
'hour': str(timestamp.hour), # range from 0 to 23
}
inc_keys = [
key.format(**datetime_args)
for key in AGGREGATION_KEYS
]
inc_update = {
'%s%s' % (inc_key, aggregate_type): inc_values[aggregate_type]
for inc_key in inc_keys
for aggregate_type in inc_values
}
return {
'$inc': inc_update,
}
| from datetime import datetime
AGGREGATION_KEYS = [
'',
'months.{month}.',
'months.{month}.days.{day}.',
'months.{month}.days.{day}.hours.{hour}.',
]
DATETIME_KEY = 'datetime'
def build_filter_query(timestamp, tags=None):
filters = tags or {}
filters[DATETIME_KEY] = datetime(timestamp.year, 1, 1)
return filters
def build_update_query(value, timestamp):
datetime_args = {
'month': str(timestamp.month - 1), # Array index: range from 0 to 11
'day': str(timestamp.day - 1), # Array index: range from 0 to 27 / 28 / 29 or 30
'hour': str(timestamp.hour), # range from 0 to 23
}
inc_keys = [
key.format(**datetime_args)
for key in AGGREGATION_KEYS
]
inc_update = {
'%s%s' % (inc_key, aggregate_type): value if aggregate_type is "sum" else 1
for inc_key in inc_keys
for aggregate_type in ['count', 'sum']
}
return {
'$inc': inc_update,
}
| Python | 0 |
51432aa92e233ba3c9db500e4e3d55b7067e906c | Add latest version of py-jinja2 (#13311) | var/spack/repos/builtin/packages/py-jinja2/package.py | var/spack/repos/builtin/packages/py-jinja2/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "https://palletsprojects.com/p/jinja/"
url = "https://pypi.io/packages/source/J/Jinja2/Jinja2-2.10.3.tar.gz"
import_modules = ['jinja2']
version('2.10.3', sha256='9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de')
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe@0.23:', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "http://jinja.pocoo.org/"
url = "https://pypi.io/packages/source/J/Jinja2/Jinja2-2.9.6.tar.gz"
import_modules = ['jinja2']
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
| Python | 0 |
77f155fec48c808724eff1b2631035d2526c170f | add version 2.11.3 (#23698) | var/spack/repos/builtin/packages/py-jinja2/package.py | var/spack/repos/builtin/packages/py-jinja2/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "https://palletsprojects.com/p/jinja/"
pypi = "Jinja2/Jinja2-2.10.3.tar.gz"
version('2.11.3', sha256='a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6')
version('2.10.3', sha256='9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de')
version('2.10.1', sha256='065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013')
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('python@2.7:2.8,3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe@0.23:', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "https://palletsprojects.com/p/jinja/"
pypi = "Jinja2/Jinja2-2.10.3.tar.gz"
version('2.10.3', sha256='9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de')
version('2.10.1', sha256='065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013')
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe@0.23:', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
| Python | 0.000009 |
5b6445e519fa9c03d703144462004ac27b9079ba | Add latest version of joblib (#11495) | var/spack/repos/builtin/packages/py-joblib/package.py | var/spack/repos/builtin/packages/py-joblib/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJoblib(PythonPackage):
"""Python function as pipeline jobs"""
homepage = "http://packages.python.org/joblib/"
url = "https://pypi.io/packages/source/j/joblib/joblib-0.13.2.tar.gz"
import_modules = [
'joblib', 'joblib.externals', 'joblib.externals.cloudpickle',
'joblib.externals.loky', 'joblib.externals.loky.backend'
]
version('0.13.2', sha256='315d6b19643ec4afd4c41c671f9f2d65ea9d787da093487a81ead7b0bac94524')
version('0.10.3', '455401ccfaf399538d8e5333086df2d3')
version('0.10.2', 'ebb42af4342c2445b175f86bd478d869')
version('0.10.0', '61e40322c4fed5c22905f67d7d1aa557')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJoblib(PythonPackage):
"""Python function as pipeline jobs"""
homepage = "http://packages.python.org/joblib/"
url = "https://pypi.io/packages/source/j/joblib/joblib-0.10.3.tar.gz"
version('0.10.3', '455401ccfaf399538d8e5333086df2d3')
version('0.10.2', 'ebb42af4342c2445b175f86bd478d869')
version('0.10.0', '61e40322c4fed5c22905f67d7d1aa557')
# for testing
# depends_on('py-nose', type=('build', 'run'))
| Python | 0 |
028391c0a3778d20d162882b6778a164984ceb2a | update dependencies and fix build (#9207) | var/spack/repos/builtin/packages/py-spyder/package.py | var/spack/repos/builtin/packages/py-spyder/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySpyder(PythonPackage):
"""Scientific PYthon Development EnviRonment"""
homepage = "https://github.com/spyder-ide/spyder"
url = "https://pypi.io/packages/source/s/spyder/spyder-3.1.3.tar.gz"
version('3.1.3', '4b9b7c8c3e6dc00001e6e98473473c36')
version('2.3.9', 'dd01e07a77123c128ff79ba57b97c1d7')
depends_on('python@2.7.0:2.8.0,3.3.0:', type=('build', 'run'))
depends_on('py-rope@0.9.4:', type=('build', 'run'), when='^python@:3')
# depends_on('py-rope_py3k', type=('build', 'run'), when='^python@3:')
depends_on('py-jedi@0.9.0', type=('build', 'run'))
# otherwise collision with py-flake8
depends_on('py-pyflakes@1.2.3', type=('build', 'run'))
depends_on('py-pygments@2.0:', type=('build', 'run'))
depends_on('py-qtconsole@4.2.0:', type=('build', 'run'))
depends_on('py-nbconvert', type=('build', 'run'))
depends_on('py-sphinx', type=('build', 'run'))
# The pycodestyle dependency is split in two, because internally it
# changes its name from pep8 to pycodestyle, and spyder does not cope
# with this change until @3.2.0
# https://github.com/PyCQA/pycodestyle/issues/466
# https://github.com/spyder-ide/spyder/blob/master/CHANGELOG.md#version-32-2017-07-24
depends_on('py-pycodestyle@:1.7.1', when='@:3.1.99', type=('build', 'run'))
depends_on('py-pycodestyle@2.1.0:', when='@3.2.0:', type=('build', 'run'))
depends_on('py-pylint', type=('build', 'run'))
depends_on('py-psutil', type=('build', 'run'))
depends_on('py-qtawesome@0.4.1:', type=('build', 'run'))
depends_on('py-qtpy@1.1.0:', type=('build', 'run'))
# technically this is a transitive dependency in order for py-pyqt
# to pick up webkit, but this is the easier solution (see #9207)
depends_on('qt+webkit', type=('build', 'run'))
depends_on('py-pickleshare', type=('build', 'run'))
depends_on('py-zmq', type=('build', 'run'))
depends_on('py-chardet@2.0.0:', type=('build', 'run'))
depends_on('py-numpydoc', type=('build', 'run'))
| ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySpyder(PythonPackage):
"""Scientific PYthon Development EnviRonment"""
homepage = "https://github.com/spyder-ide/spyder"
url = "https://pypi.io/packages/source/s/spyder/spyder-3.1.3.tar.gz"
version('3.1.3', '4b9b7c8c3e6dc00001e6e98473473c36')
version('2.3.9', 'dd01e07a77123c128ff79ba57b97c1d7')
depends_on('py-setuptools', type='build')
depends_on('py-rope@0.9.4:', type=('build', 'run'), when='^python@:3')
# depends_on('py-rope_py3k', type=('build', 'run'), when='^python@3:')
depends_on('py-jedi@0.9.0', type=('build', 'run'))
# otherwise collision with py-flake8
depends_on('py-pyflakes@1.2.3', type=('build', 'run'))
depends_on('py-pygments@2.0:', type=('build', 'run'))
depends_on('py-qtconsole@4.2.0:', type=('build', 'run'))
depends_on('py-nbconvert', type=('build', 'run'))
depends_on('py-sphinx', type=('build', 'run'))
depends_on('py-pycodestyle', type=('build', 'run'))
depends_on('py-pylint', type=('build', 'run'))
depends_on('py-psutil', type=('build', 'run'))
depends_on('py-qtawesome@0.4.1:', type=('build', 'run'))
depends_on('py-qtpy@1.1.0:', type=('build', 'run'))
depends_on('py-zmq', type=('build', 'run'))
depends_on('py-chardet@2:', type=('build', 'run'))
depends_on('py-pickleshare', type=('build', 'run'))
depends_on('py-numpydoc', type=('build', 'run'))
| Python | 0 |
350a5422ed1f874e7b2780348663f320a1af6676 | Update py-theano dependencies (#14015) | var/spack/repos/builtin/packages/py-theano/package.py | var/spack/repos/builtin/packages/py-theano/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTheano(PythonPackage):
"""Optimizing compiler for evaluating mathematical expressions on CPUs
and GPUs."""
homepage = "http://deeplearning.net/software/theano/"
url = "https://pypi.io/packages/source/T/Theano/Theano-0.8.2.tar.gz"
git = "https://github.com/Theano/Theano.git"
version('master', branch='master')
version('1.0.4', sha256='35c9bbef56b61ffa299265a42a4e8f8cb5a07b2997dabaef0f8830b397086913')
version('1.0.2', sha256='6768e003d328a17011e6fca9126fbb8a6ffd3bb13cb21c450f3e724cca29abde')
version('1.0.1', sha256='88d8aba1fe2b6b75eacf455d01bc7e31e838c5a0fb8c13dde2d9472495ff4662')
version('0.8.2', sha256='7463c8f7ed1a787bf881f36d38a38607150186697e7ce7e78bfb94b7c6af8930')
variant('gpu', default=False,
description='Builds with support for GPUs via CUDA and cuDNN')
depends_on('python@2.6:2.8,3.3:')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-numpy@1.9.1:', type=('build', 'run'))
depends_on('py-scipy@0.14:', type=('build', 'run'))
depends_on('py-six@1.9.0:', type=('build', 'run'))
depends_on('blas')
depends_on('cuda', when='+gpu')
depends_on('cudnn', when='+gpu')
depends_on('py-pygpu', when='+gpu', type=('build', 'run'))
depends_on('libgpuarray', when='+gpu')
depends_on('py-nose@1.3.0:', type='test')
depends_on('py-parameterized', type='test')
depends_on('py-flake8', type='test')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTheano(PythonPackage):
"""Optimizing compiler for evaluating mathematical expressions on CPUs
and GPUs."""
homepage = "http://deeplearning.net/software/theano/"
url = "https://pypi.io/packages/source/T/Theano/Theano-0.8.2.tar.gz"
git = "https://github.com/Theano/Theano.git"
version('master', branch='master')
version('1.0.4', sha256='35c9bbef56b61ffa299265a42a4e8f8cb5a07b2997dabaef0f8830b397086913')
version('1.0.2', sha256='6768e003d328a17011e6fca9126fbb8a6ffd3bb13cb21c450f3e724cca29abde')
version('1.0.1', sha256='88d8aba1fe2b6b75eacf455d01bc7e31e838c5a0fb8c13dde2d9472495ff4662')
version('0.8.2', sha256='7463c8f7ed1a787bf881f36d38a38607150186697e7ce7e78bfb94b7c6af8930')
variant('gpu', default=False,
description='Builds with support for GPUs via CUDA and cuDNN')
depends_on('python@2.6:2.8,3.3:')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-scipy@0.11:', type=('build', 'run'))
depends_on('py-numpy@1.7.1:', type=('build', 'run'))
depends_on('py-six@1.9.0:', type=('build', 'run'))
depends_on('blas')
depends_on('cuda', when='+gpu')
depends_on('cudnn', when='+gpu')
depends_on('py-pygpu', when='+gpu', type=('build', 'run'))
depends_on('libgpuarray', when='+gpu')
depends_on('py-nose@1.3.0:', type='test')
depends_on('py-nose-parameterized@0.5.0:', type='test')
| Python | 0 |
38199ce9cfb69b21e45e679d3a6604a72da7cc5b | add version 0.5.0 to r-forcats (#20972) | var/spack/repos/builtin/packages/r-forcats/package.py | var/spack/repos/builtin/packages/r-forcats/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RForcats(RPackage):
"""Tools for Working with Categorical Variables (Factors)
Helpers for reordering factor levels (including moving specified levels to
front, ordering by first appearance, reversing, and randomly shuffling),
and tools for modifying factor levels (including collapsing rare levels
into other, 'anonymising', and manually 'recoding')."""
homepage = "http://forcats.tidyverse.org/"
url = "https://cloud.r-project.org/src/contrib/forcats_0.2.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/forcats"
version('0.5.0', sha256='8f960e789333ec597ddf2d653a64e330f03b86f465e9b71f6779f227355d90c4')
version('0.4.0', sha256='7c83cb576aa6fe1379d7506dcc332f7560068b2025f9e3ab5cd0a5f28780d2b2')
version('0.3.0', sha256='95814610ec18b8a8830eba63751954387f9d21400d6ab40394ed0ff22c0cb657')
version('0.2.0', sha256='b5bce370422d4c0ec9509249ae645373949bfbe9217cdf50dce2bfbdad9f7cd7')
depends_on('r@3.1:', type=('build', 'run'))
depends_on('r@3.2:', when='@0.5.0:', type=('build', 'run'))
depends_on('r-ellipsis', when='@0.4.0:', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-rlang', when='@0.4.0:', type=('build', 'run'))
depends_on('r-tibble', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RForcats(RPackage):
"""Helpers for reordering factor levels (including moving specified levels
to front, ordering by first appearance, reversing, and randomly
shuffling), and tools for modifying factor levels (including collapsing
rare levels into other, 'anonymising', and manually 'recoding')."""
homepage = "http://forcats.tidyverse.org/"
url = "https://cloud.r-project.org/src/contrib/forcats_0.2.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/forcats"
version('0.4.0', sha256='7c83cb576aa6fe1379d7506dcc332f7560068b2025f9e3ab5cd0a5f28780d2b2')
version('0.3.0', sha256='95814610ec18b8a8830eba63751954387f9d21400d6ab40394ed0ff22c0cb657')
version('0.2.0', sha256='b5bce370422d4c0ec9509249ae645373949bfbe9217cdf50dce2bfbdad9f7cd7')
depends_on('r@3.1:', type=('build', 'run'))
depends_on('r-tibble', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-ellipsis', when='@0.4.0:', type=('build', 'run'))
depends_on('r-rlang', when='@0.4.0:', type=('build', 'run'))
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.