commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
0ac28421fe8ed4234db13ebdbb95c700191ba042 | Remove spaces from hash tag links | devfort/bugle,devfort/bugle,simonw/bugle_project,devfort/bugle,simonw/bugle_project | bugle_project/bugle/templatetags/bugle.py | bugle_project/bugle/templatetags/bugle.py | from django import template
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
import re
import urllib
register = template.Library()
username_re = re.compile('@[0-9a-zA-Z]+')
hashtag_re = re.compile('(^|\s)(#\S+)')
@register.filter
def buglise(s):
s = unicode(s)
usernames = set(User.objects.values_list('username', flat=True))
def replace_username(match):
username = match.group(0)[1:]
if username.lower() == 'all':
return '<strong>@all</strong>'
if username in usernames:
return '<a href="/%s/">@%s</a>' % (username, username)
else:
return '@' + username
s = username_re.sub(replace_username, s)
s = hashtag_re.sub(
lambda m: '%s<a href="/search/?q=%s">%s</a>' % (
m.group(1),
urllib.quote(m.group(2)),
m.group(2),
),
s
)
return mark_safe(s)
| from django import template
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
import re
import urllib
register = template.Library()
username_re = re.compile('@[0-9a-zA-Z]+')
hashtag_re = re.compile('(?:^|\s)(#\S+)')
@register.filter
def buglise(s):
s = unicode(s)
usernames = set(User.objects.values_list('username', flat=True))
def replace_username(match):
username = match.group(0)[1:]
if username.lower() == 'all':
return '<strong>@all</strong>'
if username in usernames:
return '<a href="/%s/">@%s</a>' % (username, username)
else:
return '@' + username
s = username_re.sub(replace_username, s)
s = hashtag_re.sub(
lambda m: '<a href="/search/?q=%s">%s</a>' % (
urllib.quote(m.group(0)),
m.group(0),
),
s
)
return mark_safe(s)
| bsd-2-clause | Python |
a9ad46f143ad9223409149c7b22abafaef7f6d21 | use admin.register decorator | geometalab/osmaxx,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/drf-utm-zone-info,geometalab/drf-utm-zone-info,geometalab/osmaxx-frontend | web_frontend/osmaxx/excerptexport/admin.py | web_frontend/osmaxx/excerptexport/admin.py | from django.contrib import admin
from django.utils.safestring import mark_safe
from osmaxx.excerptexport.models import BBoxBoundingGeometry, OsmosisPolygonFilterBoundingGeometry
from osmaxx.excerptexport.models import Excerpt, ExtractionOrder, OutputFile
class BBoxBoundingGeometryAdmin(admin.ModelAdmin):
list_display = ('north', 'east', 'south', 'west')
fields = (list_display,)
readonly_fields = list_display
admin.site.register(BBoxBoundingGeometry, BBoxBoundingGeometryAdmin)
@admin.register(Excerpt)
class ExcerptAdmin(admin.ModelAdmin):
list_display = ['name', 'is_public', 'is_active', 'owner', 'bounding_geometry']
fields = ('name', ('bounding_geometry', 'bounding_geometry_subclass_instance_edit_link'))
readonly_fields = ('bounding_geometry_subclass_instance_edit_link',)
def bounding_geometry_subclass_instance_edit_link(self, excerpt):
admin_link = excerpt.bounding_geometry.subclass_instance.get_admin_url()
return mark_safe(
'<a href="{}">'
'<img src="/static/admin/img/icon_changelink.gif" alt="Change" height="10" width="10"></img> Edit {} {}'
'</a>'.format(
admin_link,
type(excerpt.bounding_geometry.subclass_instance).__name__,
excerpt.bounding_geometry.subclass_instance.id,
),
)
bounding_geometry_subclass_instance_edit_link.short_description = 'Boundary'
@admin.register(ExtractionOrder)
class ExtractionOrderAdmin(admin.ModelAdmin):
readonly_fields = ('process_id', '_extraction_configuration', 'progress_url')
admin.site.register(OutputFile)
admin.site.register(OsmosisPolygonFilterBoundingGeometry)
| from django.contrib import admin
from django.utils.safestring import mark_safe
from osmaxx.excerptexport.models import BBoxBoundingGeometry, OsmosisPolygonFilterBoundingGeometry
from osmaxx.excerptexport.models import Excerpt, ExtractionOrder, OutputFile
class BBoxBoundingGeometryAdmin(admin.ModelAdmin):
list_display = ('north', 'east', 'south', 'west')
fields = (list_display,)
readonly_fields = list_display
admin.site.register(BBoxBoundingGeometry, BBoxBoundingGeometryAdmin)
class ExcerptAdmin(admin.ModelAdmin):
list_display = ['name', 'is_public', 'is_active', 'owner', 'bounding_geometry']
fields = ('name', ('bounding_geometry', 'bounding_geometry_subclass_instance_edit_link'))
readonly_fields = ('bounding_geometry_subclass_instance_edit_link',)
def bounding_geometry_subclass_instance_edit_link(self, excerpt):
admin_link = excerpt.bounding_geometry.subclass_instance.get_admin_url()
return mark_safe(
'<a href="{}">'
'<img src="/static/admin/img/icon_changelink.gif" alt="Change" height="10" width="10"></img> Edit {} {}'
'</a>'.format(
admin_link,
type(excerpt.bounding_geometry.subclass_instance).__name__,
excerpt.bounding_geometry.subclass_instance.id,
),
)
bounding_geometry_subclass_instance_edit_link.short_description = 'Boundary'
admin.site.register(Excerpt, ExcerptAdmin)
class ExtractionOrderAdmin(admin.ModelAdmin):
readonly_fields = ('process_id', '_extraction_configuration', 'progress_url')
admin.site.register(ExtractionOrder, ExtractionOrderAdmin)
admin.site.register(OutputFile)
admin.site.register(OsmosisPolygonFilterBoundingGeometry)
| mit | Python |
b612ad22834e9755412c9f565f317519518656bf | Fix simplechrome schedulers. | eunchong/build,eunchong/build,eunchong/build,eunchong/build | masters/master.chromium.chromiumos/master_chromiumos_simplechrome_cfg.py | masters/master.chromium.chromiumos/master_chromiumos_simplechrome_cfg.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master.factory import annotator_factory
from buildbot.schedulers.basic import SingleBranchScheduler
m_annotator = annotator_factory.AnnotatorFactory()
builders = [{
'name': 'ChromiumOS %s Compile' % (board,),
'factory': m_annotator.BaseFactory('chromium'),
'notify_on_missing': True,
'category': '4simplechrome',
} for board in ('x86-generic', 'amd64-generic', 'daisy')]
def Update(_config, active_master, c):
c['schedulers'].append(SingleBranchScheduler(
name='chromium_simplechrome',
branch='master',
treeStableTimer=60,
builderNames=[b['name'] for b in builders]
))
c['builders'] += builders
| # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master.factory import annotator_factory
from buildbot.schedulers.basic import SingleBranchScheduler
m_annotator = annotator_factory.AnnotatorFactory()
builders = [{
'name': 'ChromiumOS %s Compile' % (board,),
'factory': m_annotator.BaseFactory('chromium'),
'notify_on_missing': True,
'category': '4simplechrome',
} for board in ('x86-generic', 'amd64-generic', 'daisy')]
def Update(_config, active_master, c):
c['schedulers'].extend(SingleBranchScheduler(
name='chromium_simplechrome',
branch='master',
treeStableTimer=60,
builderNames=[b['name'] for b in builders]))
c['builders'] += builders
| bsd-3-clause | Python |
a86626bb92d35d268051fc78f163609c8639c02f | Add test to check that plotly_ssl_verification setting gets set. | plotly/plotly.py,ee-in/python-api,ee-in/python-api,plotly/python-api,plotly/plotly.py,plotly/python-api,plotly/plotly.py,plotly/python-api,ee-in/python-api | plotly/tests/test_core/test_plotly/test_credentials.py | plotly/tests/test_core/test_plotly/test_credentials.py | from unittest import TestCase
import plotly.plotly.plotly as py
import plotly.tools as tls
def test_get_credentials():
if 'username' in py._credentials:
del py._credentials['username']
if 'api_key' in py._credentials:
del py._credentials['api_key']
creds = py.get_credentials()
file_creds = tls.get_credentials_file()
print(creds)
print(file_creds)
assert creds == file_creds
def test_sign_in():
un = 'anyone'
ak = 'something'
# TODO, add this!
# si = ['this', 'and-this']
py.sign_in(un, ak)
creds = py.get_credentials()
assert creds['username'] == un
assert creds['api_key'] == ak
# TODO, and check it!
# assert creds['stream_ids'] == si
class TestSignIn(TestCase):
def test_get_config(self):
plotly_domain = 'test domain'
plotly_streaming_domain = 'test streaming domain'
config1 = py.get_config()
py._config['plotly_domain'] = plotly_domain
config2 = py.get_config()
py._config['plotly_streaming_domain'] = plotly_streaming_domain
config3 = py.get_config()
self.assertEqual(config2['plotly_domain'], plotly_domain)
self.assertNotEqual(
config2['plotly_streaming_domain'], plotly_streaming_domain
)
self.assertEqual(
config3['plotly_streaming_domain'], plotly_streaming_domain
)
def test_sign_in_with_config(self):
username = 'place holder'
api_key = 'place holder'
plotly_domain = 'test domain'
plotly_streaming_domain = 'test streaming domain'
plotly_ssl_verification = False
py.sign_in(
username,
api_key,
plotly_domain=plotly_domain,
plotly_streaming_domain=plotly_streaming_domain,
plotly_ssl_verification=plotly_ssl_verification
)
config = py.get_config()
self.assertEqual(config['plotly_domain'], plotly_domain)
self.assertEqual(
config['plotly_streaming_domain'], plotly_streaming_domain
)
self.assertEqual(
config['plotly_ssl_verification'], plotly_ssl_verification
)
| from unittest import TestCase
import plotly.plotly.plotly as py
import plotly.tools as tls
def test_get_credentials():
if 'username' in py._credentials:
del py._credentials['username']
if 'api_key' in py._credentials:
del py._credentials['api_key']
creds = py.get_credentials()
file_creds = tls.get_credentials_file()
print(creds)
print(file_creds)
assert creds == file_creds
def test_sign_in():
un = 'anyone'
ak = 'something'
# TODO, add this!
# si = ['this', 'and-this']
py.sign_in(un, ak)
creds = py.get_credentials()
assert creds['username'] == un
assert creds['api_key'] == ak
# TODO, and check it!
# assert creds['stream_ids'] == si
class TestSignIn(TestCase):
def test_get_config(self):
plotly_domain = 'test domain'
plotly_streaming_domain = 'test streaming domain'
config1 = py.get_config()
py._config['plotly_domain'] = plotly_domain
config2 = py.get_config()
py._config['plotly_streaming_domain'] = plotly_streaming_domain
config3 = py.get_config()
self.assertEqual(config2['plotly_domain'], plotly_domain)
self.assertNotEqual(
config2['plotly_streaming_domain'], plotly_streaming_domain
)
self.assertEqual(
config3['plotly_streaming_domain'], plotly_streaming_domain
)
def test_sign_in_with_config(self):
username = 'place holder'
api_key = 'place holder'
plotly_domain = 'test domain'
plotly_streaming_domain = 'test streaming domain'
py.sign_in(
username,
api_key,
plotly_domain=plotly_domain,
plotly_streaming_domain=plotly_streaming_domain
)
config = py.get_config()
self.assertEqual(config['plotly_domain'], plotly_domain)
self.assertEqual(
config['plotly_streaming_domain'], plotly_streaming_domain
)
| mit | Python |
fdc9f7fabab770cf31a0351108eac18e1a5a4289 | Store SMART values | eReuse/DeviceHub,eReuse/DeviceHub | ereuse_devicehub/resources/event/device/test_hard_drive/settings.py | ereuse_devicehub/resources/event/device/test_hard_drive/settings.py | from ereuse_devicehub.resources.event.device.settings import EventSubSettingsOneDevice, \
EventWithOneDevice, parent_materialized
class TestHardDrive(EventWithOneDevice):
"""
We decided to take these specific SMART values because of
https://www.backblaze.com/blog/hard-drive-smart-stats/.
"""
type = {
'type': 'string',
# 'allowed': ['Short Offline', 'Extended Offline']
}
status = {
'type': 'string',
'required': True
}
lifetime = {
'type': 'integer',
}
firstError = {
'type': 'integer',
'nullable': True
}
snapshot = {
'type': 'objectid',
'data_relation': {
'resource': 'events',
'field': '_id',
'embeddable': True
}
}
error = {
'type': 'boolean',
'required': True
}
passedLifetime = {
'type': 'integer'
}
parent = parent_materialized
reallocatedSectorCount = {
'type': 'integer'
}
powerCycleCount = {
'type': 'integer'
}
reportedUncorrectableErrors = {
'type': 'integer'
}
CommandTimeout = {
'type': 'integer'
}
CurrentPendingSectorCount = {
'type': 'integer'
}
OfflineUncorrectable = {
'type': 'integer'
}
RemainingLifetimePercentage = {
'type': 'integer'
}
class TestHardDriveSettings(EventSubSettingsOneDevice):
_schema = TestHardDrive
fa = 'fa-flask'
short_description = 'A test of the health of the hard drive'
item_methods = ['GET']
| from ereuse_devicehub.resources.event.device.settings import parent_materialized, EventWithOneDevice, \
EventSubSettingsOneDevice
class TestHardDrive(EventWithOneDevice):
type = {
'type': 'string',
# 'allowed': ['Short Offline', 'Extended Offline']
}
status = {
'type': 'string',
'required': True
}
lifetime = {
'type': 'integer',
}
firstError = {
'type': 'integer',
'nullable': True
}
snapshot = {
'type': 'objectid',
'data_relation': {
'resource': 'events',
'field': '_id',
'embeddable': True
}
}
error = {
'type': 'boolean',
'required': True
}
passedLifetime = {
'type': 'integer'
}
parent = parent_materialized
class TestHardDriveSettings(EventSubSettingsOneDevice):
_schema = TestHardDrive
fa = 'fa-flask'
short_description = 'A test of the health of the hard drive'
item_methods = ['GET']
| agpl-3.0 | Python |
7834a0182b96cc372d50fc58eefcbdaeceb71e3d | Update move_credit_limit_to_customer_credit_limit.py | gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext | erpnext/patches/v12_0/move_credit_limit_to_customer_credit_limit.py | erpnext/patches/v12_0/move_credit_limit_to_customer_credit_limit.py | # Copyright (c) 2019, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
''' Move credit limit and bypass credit limit to the child table of customer credit limit '''
frappe.reload_doc("Selling", "doctype", "Customer Credit Limit")
frappe.reload_doc("Selling", "doctype", "Customer")
if frappe.db.a_row_exists("Customer Credit Limit"):
return
move_credit_limit_to_child_table()
def move_credit_limit_to_child_table():
''' maps data from old field to the new field in the child table '''
fields=""
if frappe.db.has_column('Customer', 'bypass_credit_limit_check_at_sales_order'):
fields = ", bypass_credit_limit_check_at_sales_order"
credit_limit_record = frappe.db.sql(''' SELECT
name, credit_limit
{0}
FROM `tabCustomer`'''.format(fields), as_dict=1) #nosec
companies = frappe.get_all("Company", 'name')
for record in credit_limit_record:
customer = frappe.get_doc("Customer", record.name)
for company in companies:
customer.append("credit_limit_reference", {
'credit_limit': record.credit_limit,
'bypass_credit_limit_check': record.bypass_credit_limit_check_at_sales_order,
'company': company.name
})
customer.db_insert()
| # Copyright (c) 2019, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
''' Move credit limit and bypass credit limit to the child table of customer credit limit '''
frappe.reload_doc("Selling", "doctype", "Customer Credit Limit")
frappe.reload_doc("Selling", "doctype", "Customer")
if frappe.db.a_row_exists("Customer Credit Limit"):
return
move_credit_limit_to_child_table()
def move_credit_limit_to_child_table():
''' maps data from old field to the new field in the child table '''
fields=""
if frappe.db.has_column('Customer', 'bypass_credit_limit_check_at_sales_order'):
fields = ", bypass_credit_limit_check_at_sales_order"
credit_limit_record = frappe.db.sql(''' SELECT
name, credit_limit
{0}
FROM `tabCustomer`'''.format(fields), as_dict=1) #nosec
companies = frappe.get_all("Company", 'name')
for record in credit_limit_record:
customer = frappe.get_doc("Customer", record.name)
for company in companies:
customer.append("credit_limit_reference", {
'credit_limit': record.credit_limit,
'bypass_credit_limit_check': record.bypass_credit_limit_check_at_sales_order,
'company': company.name
})
customer.save() | agpl-3.0 | Python |
e97310e315e95eef0901f0238a1e9cce012f54a0 | Store ticket quantity on ticket bundle creation | m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps | byceps/services/ticket/models/ticket_bundle.py | byceps/services/ticket/models/ticket_bundle.py | # -*- coding: utf-8 -*-
"""
byceps.services.ticket.models.ticket_bundle
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2016 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from ....database import db, generate_uuid
from ....util.instances import ReprBuilder
from ...seating.models.category import Category
from ...user.models.user import User
class TicketBundle(db.Model):
"""A set of tickets of the same category and with with a common
owner, seat manager, and user manager, respectively.
"""
__tablename__ = 'ticket_bundles'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
ticket_category_id = db.Column(db.Uuid, db.ForeignKey('seat_categories.id'), index=True, nullable=False)
ticket_category = db.relationship(Category)
ticket_quantity = db.Column(db.Integer, nullable=False)
owned_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'), index=True, nullable=False)
owned_by = db.relationship(User, foreign_keys=[owned_by_id])
seats_managed_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'), index=True, nullable=True)
seats_managed_by = db.relationship(User, foreign_keys=[seats_managed_by_id])
users_managed_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'), index=True, nullable=True)
users_managed_by = db.relationship(User, foreign_keys=[users_managed_by_id])
def __init__(self, ticket_category_id, ticket_quantity, owned_by):
self.ticket_category_id = ticket_category_id
self.ticket_quantity = ticket_quantity
self.owned_by = owned_by
def __repr__(self):
return ReprBuilder(self) \
.add('id', str(self.id)) \
.add('party', self.category.party_id) \
.add('category', self.category.title) \
.add_with_lookup('ticket_quantity') \
.build()
| # -*- coding: utf-8 -*-
"""
byceps.services.ticket.models.ticket_bundle
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2016 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from ....database import db, generate_uuid
from ....util.instances import ReprBuilder
from ...seating.models.category import Category
from ...user.models.user import User
class TicketBundle(db.Model):
"""A set of tickets of the same category and with with a common
owner, seat manager, and user manager, respectively.
"""
__tablename__ = 'ticket_bundles'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
ticket_category_id = db.Column(db.Uuid, db.ForeignKey('seat_categories.id'), index=True, nullable=False)
ticket_category = db.relationship(Category)
ticket_quantity = db.Column(db.Integer, nullable=False)
owned_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'), index=True, nullable=False)
owned_by = db.relationship(User, foreign_keys=[owned_by_id])
seats_managed_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'), index=True, nullable=True)
seats_managed_by = db.relationship(User, foreign_keys=[seats_managed_by_id])
users_managed_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'), index=True, nullable=True)
users_managed_by = db.relationship(User, foreign_keys=[users_managed_by_id])
def __init__(self, ticket_category_id, owned_by):
self.ticket_category_id = ticket_category_id
self.owned_by = owned_by
def __repr__(self):
return ReprBuilder(self) \
.add('id', str(self.id)) \
.add('party', self.category.party_id) \
.add('category', self.category.title) \
.add_with_lookup('ticket_quantity') \
.build()
| bsd-3-clause | Python |
ac1a10bb90ef8c81a853b78bf7acd40772966fe5 | Use page-size not pageSize instead of limit (#430) | guardian/alerta,guardian/alerta,guardian/alerta,guardian/alerta | alerta/utils/paging.py | alerta/utils/paging.py |
from flask import current_app
from alerta.exceptions import ApiError
class Page(object):
def __init__(self, page=1, page_size=None, items=0):
self.page = page
self.page_size = page_size or current_app.config['DEFAULT_PAGE_SIZE']
self.items = items
if items and self.page > self.pages or self.page < 1:
raise ApiError("page out of range: 1-%s" % self.pages, 416)
@staticmethod
def from_params(params, items):
# page, page-size, limit (deprecated)
page = params.get('page', 1, int)
limit = params.get('limit', 0, int)
page_size = params.get('page-size', limit, int)
return Page(page, page_size, items)
@property
def pages(self):
return ((self.items - 1) // self.page_size) + 1
@property
def has_more(self):
return self.page < self.pages
|
from flask import current_app
from alerta.exceptions import ApiError
class Page(object):
def __init__(self, page=1, page_size=None, items=0):
self.page = page
self.page_size = page_size or current_app.config['DEFAULT_PAGE_SIZE']
self.items = items
if items and self.page > self.pages or self.page < 1:
raise ApiError("page out of range: 1-%s" % self.pages, 416)
@staticmethod
def from_params(params, items):
# page, page-size, limit (deprecated)
page = params.get('page', 1, int)
limit = params.get('limit', 0, int)
page_size = params.get('pageSize', limit, int)
return Page(page, page_size, items)
@property
def pages(self):
return ((self.items - 1) // self.page_size) + 1
@property
def has_more(self):
return self.page < self.pages
| apache-2.0 | Python |
fd12d4131dcf2277afd277c991a42fa475a1c097 | Tidy up pir-status.py | ilkkajylha/office-iot,ilkkajylha/office-iot | pir-status.py | pir-status.py | #!/usr/bin/env python
"""
Python source code - This python script reads pir state from arduino and makes it available via http(s). WIP
"""
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import serial, argparse, os, time
# Parse arguments
parser = argparse.ArgumentParser()
# example: parser.add_argument('-', '--', help="default:" , default='')
parser.add_argument('-b', '--baud', help="Set baud, default: 9600" , default='9600')
parser.add_argument('-o', '--os', help="I have funny os, I want to se it manually")
parser.add_argument('-d', '--demo', help="Runs in demo mode, does not read data from arduino but instead uses random data")
parser.add_argument('-c', '--com', help="Set com port to use. *nix uses /dev/ttyACM0 and Windows uses COM5 for example. default:", default='COM5')
args = parser.parse_args()
globals().update(vars(parser.parse_args()))
device = str(args.com)
baud = int(args.baud)
ser = serial.Serial(device, baud)
print device, baud, ser
status = []
timer = 0
def readStatus():
status = []
timer = 0
while True:
pirstatus = ser.read(1)
if pirstatus.isdigit():
status.append(int(pirstatus))
timer+=1
print(timer)
if timer == 30:
timer = 0
if max(status) == 1:
print(type(max(status)))
else:
print(type(max(status)))
timer = 0
status = []
readStatus()
| #!/usr/bin/env python
"""
Python source code - This python script reads pir state from arduino and makes it available via http(s). WIP
"""
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import serial, argparse, os, time
# Parse arguments
parser = argparse.ArgumentParser()
# example: parser.add_argument('-', '--', help="default:" , default='')
parser.add_argument('-b', '--baud', help="Set baud, default: 9600" , default='9600')
parser.add_argument('-o', '--os', help="I have funny os, I want to se it manually")
parser.add_argument('-d', '--demo', help="Runs in demo mode, does not read data from arduino but instead uses random data")
parser.add_argument('-c', '--com', help="Set com port to use. *nix uses /dev/ttyACM0 and Windows uses COM5 for example. default:", default='COM5')
args = parser.parse_args()
globals().update(vars(parser.parse_args()))
device = str(args.com)
baud = int(args.baud)
ser = serial.Serial(device, baud)
print device, baud, ser
status = []
timer = 0
#while True:
# print(ser.read(1))
def readStatus():
status = []
timer = 0
while True:
pirstatus = ser.read(1)
if pirstatus.isdigit():
status.append(int(pirstatus))
timer+=1
print(timer)
if timer == 30:
timer = 0
if max(status) == 1:
#print("room has had movement in past 500 time units")
#print(max(status))
print(type(max(status)))
#print(status)
else:
#print("room has NOT had movement in past 500 time units")
#print(status)
#print(max(status))
print(type(max(status)))
timer = 0
status = []
readStatus()
| mit | Python |
65a3b6725ab03ab585ab66195c27368d349bfaa9 | Add invited to event creation | DavidJFelix/hatchit,DavidJFelix/hatchit,DavidJFelix/hatchit | src/event_manager/views/api.py | src/event_manager/views/api.py | from django.shortcuts import render
import json
from django.http import HttpResponse
from event_manager.models import Suggestion, Event, Invite
from django.contrib.auth.models import User
def api_get(request, type="e"):
if type == "e":
# data = Event.objects.values()
# return render(request, 'api.html', {'data': data})
invites = Invite.objects.select_related().filter(user__id=1)
event_objects = [invite.event for invite in invites]
events = [
{
"id": event.id,
"owner_name": event.owner.username,
"description": event.description,
"location_id": event.location_id,
"start_time": str(event.start_time),
"end_time": str(event.end_time),
} for event in event_objects
]
return HttpResponse(json.dumps(events), content_type="application/json")
elif type == "u":
Users = User.objects.all()
data = [
{
"id": user.id,
"username": user.username,
"first_name": user.first_name,
"last_name": user.last_name,
"email": user.email
} for user in Users
]
return HttpResponse(json.dumps(data), content_type="application/json")
elif type == "s":
Suggestions = Suggestion.objects.all()
data = [
{
"id": suggestion.id,
"username": suggestion.user_id,
"first_name": suggestion.response,
"last_name": str(suggestion.time)
} for suggestion in Suggestions
]
return HttpResponse(json.dumps(data), content_type="application/json")
else:
return render(request, 'api.html', {'data': ""})
def new(request):
# event_description = request.POST["description"]
# event_owner_id = request.POST["owner_id"]
# event_location_id = request.POST["location_id"]
event_description = "Felix"
event_owner_id = 1
event_location_id = 1
event_start_time = datetime.now()
event = Event(
description = event_description,
owner_id = event_owner_id,
location_id = event_location_id,
start_time = event_start_time
)
event_id = event.id
event.save()
invite_event_id = event_id
invite_user_id = 1
# invite_rsvp = "NONE"
invite = Invite(
event_id = invite_event_id,
user_id = 1,
rsvp = event.NONE
)
invite.save()
return HttpResponse("ASDF", content_type="application/json") | from django.shortcuts import render
import json
from django.http import HttpResponse
from event_manager.models import Suggestion, Event, Invite
from django.contrib.auth.models import User
def api_get(request, type="e"):
if type == "e":
# data = Event.objects.values()
# return render(request, 'api.html', {'data': data})
invites = Invite.objects.select_related().filter(user__id=1)
event_objects = [invite.event for invite in invites]
events = [
{
"id": event.id,
"owner_name": event.owner.username,
"description": event.description,
"location_id": event.location_id,
"start_time": str(event.start_time),
"end_time": str(event.end_time),
} for event in event_objects
]
return HttpResponse(json.dumps(events), content_type="application/json")
elif type == "u":
Users = User.objects.all()
data = [
{
"id": user.id,
"username": user.username,
"first_name": user.first_name,
"last_name": user.last_name,
"email": user.email
} for user in Users
]
return HttpResponse(json.dumps(data), content_type="application/json")
elif type == "s":
Suggestions = Suggestion.objects.all()
data = [
{
"id": suggestion.id,
"username": suggestion.user_id,
"first_name": suggestion.response,
"last_name": str(suggestion.time)
} for suggestion in Suggestions
]
return HttpResponse(json.dumps(data), content_type="application/json")
else:
return render(request, 'api.html', {'data': ""})
| agpl-3.0 | Python |
b89a93faae8237561181f3d22b164de5b6dc728c | Remove unused imports. (#4083) | keras-team/keras,keras-team/keras,dolaameng/keras | examples/imdb_cnn_lstm.py | examples/imdb_cnn_lstm.py | '''Train a recurrent convolutional network on the IMDB sentiment
classification task.
Gets to 0.8498 test accuracy after 2 epochs. 41s/epoch on K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Convolution1D, MaxPooling1D
from keras.datasets import imdb
# Embedding
max_features = 20000
maxlen = 100
embedding_size = 128
# Convolution
filter_length = 5
nb_filter = 64
pool_length = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
nb_epoch = 2
'''
Note:
batch_size is highly sensitive.
Only 2 epochs are needed as the dataset is very small.
'''
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| '''Train a recurrent convolutional network on the IMDB sentiment
classification task.
Gets to 0.8498 test accuracy after 2 epochs. 41s/epoch on K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM, GRU, SimpleRNN
from keras.layers import Convolution1D, MaxPooling1D
from keras.datasets import imdb
# Embedding
max_features = 20000
maxlen = 100
embedding_size = 128
# Convolution
filter_length = 5
nb_filter = 64
pool_length = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
nb_epoch = 2
'''
Note:
batch_size is highly sensitive.
Only 2 epochs are needed as the dataset is very small.
'''
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| apache-2.0 | Python |
6d5961628cdfa1b54e2290edc73e112ee5ca236f | Update sliv.py | RigFox/VK_chatrename,RigFox/VK_chatrename | sliv.py | sliv.py | # -*- coding: utf-8 -*-
from setting.setting import VK_API_TOKEN
from setting.setting import VK_CHAT_ID
import requests
import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('log/log.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
fSeptember = "01.09.2016"
fSeptemberDate = datetime.datetime.strptime(fSeptember, "%d.%m.%Y")
nowDate = datetime.datetime.now()
diffDate = fSeptemberDate - nowDate
dayStr = "день"
if diffDate.days // 10 == 1:
dayStr = "дней"
elif (diffDate.days % 10 == 0) or (diffDate.days % 10 >= 5):
dayStr = "дней"
elif (diffDate.days % 10 in [2,3,4]):
dayStr = "дня"
pmProgress = str(round(2 + (92 / (92 - diffDate.days)), 6)) + " ПМиИ. " + str(diffDate.days) + " дней до 1 сентября"
payload = {"chat_id": VK_CHAT_ID, "title": pmProgress, "access_token": VK_API_TOKEN}
r = requests.get('https://api.vk.com/method/messages.editChat', params=payload, timeout=10)
logger.debug("VK API return: " + r.text)
| # -*- coding: utf-8 -*-
from setting.setting import VK_API_TOKEN
from setting.setting import VK_CHAT_ID
import requests
import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('log/log.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
fSeptember = "01.09.2016"
fSeptemberDate = datetime.datetime.strptime(fSeptember, "%d.%m.%Y")
nowDate = datetime.datetime.now()
diffDate = fSeptemberDate - nowDate
pmProgress = str(round(2 + (92 / diffDate.days), 6)) + " ПМиИ. " + str(diffDate.days) + " дней до 1 сентября"
payload = {"chat_id": VK_CHAT_ID, "title": pmProgress, "access_token": VK_API_TOKEN}
r = requests.get('https://api.vk.com/method/messages.editChat', params=payload, timeout=10)
logger.debug("VK API return: " + r.text)
| apache-2.0 | Python |
d4b00b7d18dc4ea142ba32bbcf3811d27572a1e5 | Test Linux | compmonk/playlister | playlister.py | playlister.py | #!/usr/bin/python
#20170913:232732
import argparse
import os
import os.path as osp
import sys
import subprocess
import re
from json import loads
from urllib import quote
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
INFO = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
RESET = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def duration(file, precision = 3):
result = subprocess.Popen(' '.join(
['ffprobe',
'-print_format json',
'-show_format',
re.escape(file)]),
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
shell = True)
output = ''.join(result.stdout.readlines())
meta = loads(output[output.find('{') : output.rfind('}') + 1])
duration = float(meta['format']['duration'])
duration = round(duration, precision) * 10 ** precision
return int(duration)
def error(message):
print >> sys.stderr, bcolors.ERROR + "[ERROR] "+ bcolors.RESET + "{}".format(message)
exit(0)
def validate(directory):
if not osp.exists(directory):
error("Directory {} does not exist".format(directory))
def info(message):
if verbose:
print bcolors.INFO + "[INFO] " + bcolors.RESET + "{}".format(message)
video_extensions = ['.mp4']
def is_video(file):
return osp.splitext(file)[1] in video_extensions
parser = argparse.ArgumentParser(description = "Create VLC playlist")
parser.add_argument("-d", "--directory", default = os.getcwd(), help = "directory containing videos")
parser.add_argument("-n", "--name", help = "name of the playlist")
parser.add_argument("-v", "--verbose", default = False, action="store_true", help="verbose")
args = parser.parse_args()
directory = args.directory.rstrip('/')
name = args.name if args.name else osp.basename(directory)
verbose = args.verbose
info("Entering directory {}".format(directory))
validate(directory)
files = [osp.relpath(osp.join(dirpath, file), directory) for (dirpath, dirnames, filenames) in os.walk(directory) for file in filenames]
info("Finding video files")
videos = filter(is_video, files)
videos.sort()
number_of_videos = len(videos)
info("{} video/s found".format(number_of_videos))
if number_of_videos == 0:
error("No videos found")
try:
info("Creating playlist {}".format(name + '.xspf'))
playlist = open(directory + '/' + name + '.xspf', 'w')
except IOError:
error("Permission denied")
info("Writing into playlist")
playlist.write('<?xml version="1.0" encoding="UTF-8"?>\n')
playlist.write('<playlist xmlns="http://xspf.org/ns/0/" xmlns:vlc="http://www.videolan.org/vlc/playlist/ns/0/" version="1">\n')
playlist.write('\t<title>Playlist</title>\n')
playlist.write('\t<trackList>\n')
for i in range(number_of_videos):
info("Video {} of {} : {}".format(i + 1, number_of_videos, videos[i]))
playlist.write('\t\t<track>\n')
playlist.write('\t\t\t<location>./{}</location>\n'.format(quote(videos[i])))
playlist.write('\t\t\t<duration>{}</duration>\n'.format(duration(osp.abspath(directory + os.sep + videos[i]))))
playlist.write('\t\t\t<extension application="http://www.videolan.org/vlc/playlist/0">\n')
playlist.write('\t\t\t\t<vlc:id>{}</vlc:id>\n'.format(i))
playlist.write('\t\t\t</extension>\n')
playlist.write('\t\t</track>\n')
playlist.write('\t</trackList>\n')
playlist.write('\t<extension application="http://www.videolan.org/vlc/playlist/0">\n')
for i in range(number_of_videos):
playlist.write('\t\t\t<vlc:item tid="{}"/>\n'.format(i))
playlist.write('\t</extension>\n')
playlist.write('</playlist>\n')
playlist.close()
| mit | Python | |
a508196c65b6d20df4efe3c973827e0579298245 | Fix serializing message logs when publishing from stores. | waartaa/ircb | ircb/models/logs.py | ircb/models/logs.py | # -*- coding: utf-8 -*-
import datetime
import sqlalchemy as sa
from ircb.models.lib import Base
class BaseLog(object):
id = sa.Column(sa.Integer, primary_key=True)
hostname = sa.Column(sa.String(100), nullable=False)
roomname = sa.Column(sa.String(255), nullable=False)
message = sa.Column(sa.String(2048), default='')
event = sa.Column(sa.String(20), nullable=False)
timestamp = sa.Column(sa.TIMESTAMP(timezone=True))
mask = sa.Column(sa.String(100), default='')
user_id = sa.Column(sa.Integer)
# timestamps
created = sa.Column(sa.DateTime, default=datetime.datetime.utcnow)
last_updated = sa.Column(sa.DateTime,
default=datetime.datetime.utcnow)
def to_dict(self, serializable=False):
d = super().to_dict()
d['timestamp'] = self.timestamp.timestamp()
d['created'] = self.created.timestamp()
d['last_updated'] = self.last_updated.timestamp()
return d
class MessageLog(BaseLog, Base):
"""
Network/Channel/PM messages
"""
__tablename__ = 'message_logs'
from_nickname = sa.Column(sa.String(20))
from_user_id = sa.Column(sa.Integer, nullable=True, default=None)
class ActivityLog(BaseLog, Base):
"""
Channel activity(join, part, quit) logs
"""
__tablename__ = 'activity_logs'
| # -*- coding: utf-8 -*-
import datetime
import sqlalchemy as sa
from ircb.models.lib import Base
class BaseLog(object):
id = sa.Column(sa.Integer, primary_key=True)
hostname = sa.Column(sa.String(100), nullable=False)
roomname = sa.Column(sa.String(255), nullable=False)
message = sa.Column(sa.String(2048), default='')
event = sa.Column(sa.String(20), nullable=False)
timestamp = sa.Column(sa.TIMESTAMP(timezone=True))
mask = sa.Column(sa.String(100), default='')
user_id = sa.Column(sa.Integer)
# timestamps
created = sa.Column(sa.DateTime, default=datetime.datetime.utcnow)
last_updated = sa.Column(sa.DateTime,
default=datetime.datetime.utcnow)
def to_dict(self, serializable=False):
d = super().to_dict()
if serializable:
d['timestamp'] = self.timestamp.timestamp()
d['created'] = self.created.timestamp()
d['last_updated'] = self.last_updated.timestamp()
class MessageLog(Base, BaseLog):
"""
Network/Channel/PM messages
"""
__tablename__ = 'message_logs'
from_nickname = sa.Column(sa.String(20))
from_user_id = sa.Column(sa.Integer, nullable=True, default=None)
class ActivityLog(Base, BaseLog):
"""
Channel activity(join, part, quit) logs
"""
__tablename__ = 'activity_logs'
| mit | Python |
25a61363ba84a46422ef1a8cec317619d69b60a2 | Fix typo | kaichogami/sympy_gamma,bolshoibooze/sympy_gamma,bolshoibooze/sympy_gamma,kaichogami/sympy_gamma,iScienceLuvr/sympy_gamma,kaichogami/sympy_gamma,github4ry/sympy_gamma,debugger22/sympy_gamma,github4ry/sympy_gamma,iScienceLuvr/sympy_gamma,debugger22/sympy_gamma,github4ry/sympy_gamma,bolshoibooze/sympy_gamma,iScienceLuvr/sympy_gamma | app/templatetags/extra_tags.py | app/templatetags/extra_tags.py | from django import template
import urllib
register = template.Library()
@register.tag(name='make_query')
def do_make_query(parser, token):
try:
tag_name, query = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0])
return QueryNode(query)
class QueryNode(template.Node):
def __init__(self, query):
if query[0] == query[-1] and query[0] in ('"', "'"):
self.query = query
else:
self.query = template.Variable(query)
def render(self, context):
if isinstance(self.query, unicode):
return "/input/?i=" + urllib.quote(self.query[1:-1])
else:
return "/input/?i=" + urllib.quote(self.query.resolve(context))
@register.tag(name='make_query_link')
def do_make_query(parser, token):
try:
tag_name, query = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0])
return QueryLinkNode(query)
class QueryLinkNode(template.Node):
def __init__(self, query):
if query[0] == query[-1] and query[0] in ('"', "'"):
self.query = query
else:
self.query = template.Variable(query)
def render(self, context):
if isinstance(self.query, unicode) or isinstance(self.query, str):
q = self.query[1:-1]
else:
q = self.query.resolve(context)
link = '<a href="/input/?i={0}">{1}</a>'.format(urllib.quote(q), q)
return link
| from django import template
import urllib
register = template.Library()
@register.tag(name='make_query')
def do_make_query(parser, token):
try:
tag_name, query = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0])
return QueryNode(query)
class QueryNode(template.Node):
def __init__(self, query):
if query[0] == query[-1] and query[0] in ('"', "'"):
self.query = query
else:
self.query = template.Variable(query)
def render(self, context):
if isinstance(self.query, unicode):
return "/input/?i=" + urllib.quote(self.query[1:-1])
else:
return "/input/?i=" + urllib.quote(self.query.resolve(context))
@register.tag(name='make_query_link')
def do_make_query(parser, token):
try:
tag_name, query = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0])
return QueryLinkNode(query)
class QueryLinkNode(template.Node):
def __init__(self, query):
if query[0] == query[-1] and query[0] in ('"', "'"):
self.query = query
else:
self.query = template.Variable(query)
def render(self, context):
if isinstance(self.query, unicode) or isinstance(self.query, str):
q = self.query[1:-1]
else:
q = self.query.resolve(self.context)
link = '<a href="/input/?i={0}">{1}</a>'.format(urllib.quote(q), q)
return link
| bsd-3-clause | Python |
7a78525bb8cc6176dfbe348e5f95373c1d70628f | Add the checkRecaptcha( req, secret, simple=True ) function | kensonman/webframe,kensonman/webframe,kensonman/webframe | functions.py | functions.py | #-*- coding: utf-8 -*-
def getClientIP( req ):
'''
Get the client ip address
@param req The request;
'''
xForwardedFor=req.META.get('HTTP_X_FORWARDED_FOR')
if xForwardedFor:
ip=xForwardedFor.split(',')[0]
else:
ip=req.META.get('REMOTE_ADDR')
return ip
def getBool( val, defVal=False, trueOpts=['YES', 'Y', '1', 'TRUE', 'T'] ):
'''
Retrieve the boolean value from string
@param val The value to be parse to bool
@param defVal The default value if the val is None
@param trueOpts The available values of TRUE
'''
if val:
return str(val).upper() in trueOpts
return defVal
def checkRecaptcha( req, secret, simple=True ):
'''
Checking the recaptcha and return the result.
@param req The request;
@param secret The secret retreived from Google reCaptcha registration;
@param simple Retrue the simple boolean value of verification if True, otherwise, return the JSON value of verification;
'''
import requests
apiurl='https://www.google.com/recaptcha/api/siteverify'
fieldname='g-recaptcha-response'
answer=req.POST.get(fieldname, None)
clientIP=getClientIP( req )
rst=requests.post(apiurl, data={'secret': secret, 'response':answer, 'remoteip': clientIP}).json()
if simple:
return getBool(rst.get('success', 'False'))
return r.json()
| #-*- coding: utf-8 -*-
def getClientIP( req ):
'''
Get the client ip address
'''
xForwardedFor=req.META.get('HTTP_X_FORWARDED_FOR')
if xForwardedFor:
ip=xForwardedFor.split(',')[0]
else:
ip=req.META.get('REMOTE_ADDR')
return ip
def getBool( val, trueOpts=['YES', 'Y', '1', 'TRUE', 'T'] ):
'''
Retrieve the boolean value from string
'''
if val:
return str(val).upper() in trueOpts
return False
| apache-2.0 | Python |
7682bff9451a3a5f471ef8f28b905d3ae2b2c92a | Use layout.scalar in test | arsenovic/clifford,arsenovic/clifford | clifford/test/test_multivector_inverse.py | clifford/test/test_multivector_inverse.py | import numpy as np
import pytest
import clifford as cf
class TestClosedForm:
@pytest.mark.parametrize('p, q', [
(p, total_dims - p)
for total_dims in [1, 2, 3, 4, 5]
for p in range(total_dims + 1)
])
def test_hitzer_inverse(self, p, q):
Ntests = 100
layout, blades = cf.Cl(p, q)
for i in range(Ntests):
mv = layout.randomMV()
mv_inv = mv.hitzer_inverse()
np.testing.assert_almost_equal((mv * mv_inv).value,
layout.scalar.value)
| import numpy as np
import pytest
import clifford as cf
class TestClosedForm:
@pytest.mark.parametrize('p, q', [
(p, total_dims - p)
for total_dims in [1, 2, 3, 4, 5]
for p in range(total_dims + 1)
])
def test_hitzer_inverse(self, p, q):
Ntests = 100
layout, blades = cf.Cl(p, q)
for i in range(Ntests):
mv = layout.randomMV()
mv_inv = mv.hitzer_inverse()
np.testing.assert_almost_equal((mv * mv_inv).value,
(1.0 + 0*blades["e1"]).value)
| bsd-3-clause | Python |
02bcb1d8e3766b672688db6d8885fce9e198f526 | Remove statsd code | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/core/signals.py | cla_backend/apps/core/signals.py | import logging
logger = logging.getLogger(__name__)
def log_user_created(sender, instance, created, **kwargs):
if created:
logger.info(
"User created",
extra={
"USERNAME": instance.username,
"IS_STAFF": unicode(instance.is_staff),
"IS_ACTIVE": unicode(instance.is_active),
"IS_SUPERUSER": unicode(instance.is_superuser),
},
)
def log_user_modified(sender, instance, **kwargs):
try:
sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
return
logger.info(
"User modified",
extra={
"USERNAME": instance.username,
"IS_STAFF": unicode(instance.is_staff),
"IS_ACTIVE": unicode(instance.is_active),
"IS_SUPERUSER": unicode(instance.is_superuser),
},
)
| import logging
from django_statsd.clients import statsd
logger = logging.getLogger(__name__)
def log_user_created(sender, instance, created, **kwargs):
if created:
statsd.incr("user.created")
logger.info(
"User created",
extra={
"USERNAME": instance.username,
"IS_STAFF": unicode(instance.is_staff),
"IS_ACTIVE": unicode(instance.is_active),
"IS_SUPERUSER": unicode(instance.is_superuser),
},
)
def log_user_modified(sender, instance, **kwargs):
try:
sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
return
statsd.incr("user.modified")
logger.info(
"User modified",
extra={
"USERNAME": instance.username,
"IS_STAFF": unicode(instance.is_staff),
"IS_ACTIVE": unicode(instance.is_active),
"IS_SUPERUSER": unicode(instance.is_superuser),
},
)
| mit | Python |
db0163e8af75ba22c6ec8d9f582027583e7c482f | use post_config_hook | ultrabug/py3status,tobes/py3status,ultrabug/py3status,tobes/py3status,guiniol/py3status,vvoland/py3status,alexoneill/py3status,guiniol/py3status,ultrabug/py3status,Andrwe/py3status,valdur55/py3status,Andrwe/py3status,valdur55/py3status,valdur55/py3status | py3status/modules/getjson.py | py3status/modules/getjson.py | # -*- coding: utf-8 -*-
"""
Display JSON data fetched from a URL.
This module gets the given `url` configuration parameter and assumes the
response is a JSON object. The keys of the JSON object are used as the format
placeholders. The format placeholders are replaced by the value. Objects that
are nested can be accessed by using the `delimiter` configuration parameter
in between.
Examples:
```
# Straightforward key replacement
url = 'http://ip-api.com/json'
format = '{lat}, {lon}'
# Access child objects
url = 'http://api.icndb.com/jokes/random'
format = '{value-joke}'
# Access title from 0th element of articles list
url = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'
format = '{articles-0-title}'
# Access if top-level object is a list
url = 'https://jsonplaceholder.typicode.com/posts/1/comments'
format = '{0-name}'
```
Configuration parameters:
cache_timeout: refresh interval for this module (default 30)
delimiter: the delimiter between parent and child objects (default '-')
format: display format for this module (default None)
timeout: time to wait for a response, in seconds (default 5)
url: specify URL to fetch JSON from (default None)
Format placeholders:
Placeholders will be replaced by the JSON keys.
Placeholders for objects with sub-objects are flattened using 'delimiter'
in between (eg. {'parent': {'child': 'value'}} will use placeholder
{parent-child}).
Placeholders for list elements have 'delimiter' followed by the index
(eg. {'parent': ['this', 'that']) will use placeholders {parent-0}
for 'this' and {parent-1} for 'that'.
@author vicyap
SAMPLE OUTPUT
{'full_text': 'Github: Everything operating normally'}
"""
STRING_ERROR = 'missing url'
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 30
delimiter = '-'
format = None
timeout = 5
url = None
def post_config_hook(self):
if not self.url:
raise Exception(STRING_ERROR)
def getjson(self):
"""
"""
try:
json_data = self.py3.request(self.url, timeout=self.timeout).json()
json_data = self.py3.flatten_dict(json_data, self.delimiter, True)
except self.py3.RequestException:
json_data = None
if json_data:
full_text = self.py3.safe_format(self.format, json_data)
else:
full_text = ''
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| # -*- coding: utf-8 -*-
"""
Display JSON data fetched from a URL.
This module gets the given `url` configuration parameter and assumes the
response is a JSON object. The keys of the JSON object are used as the format
placeholders. The format placeholders are replaced by the value. Objects that
are nested can be accessed by using the `delimiter` configuration parameter
in between.
Examples:
```
# Straightforward key replacement
url = 'http://ip-api.com/json'
format = '{lat}, {lon}'
# Access child objects
url = 'http://api.icndb.com/jokes/random'
format = '{value-joke}'
# Access title from 0th element of articles list
url = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'
format = '{articles-0-title}'
# Access if top-level object is a list
url = 'https://jsonplaceholder.typicode.com/posts/1/comments'
format = '{0-name}'
```
Configuration parameters:
cache_timeout: refresh interval for this module (default 30)
delimiter: the delimiter between parent and child objects (default '-')
format: display format for this module (default None)
timeout: time to wait for a response, in seconds (default 5)
url: specify URL to fetch JSON from (default None)
Format placeholders:
Placeholders will be replaced by the JSON keys.
Placeholders for objects with sub-objects are flattened using 'delimiter'
in between (eg. {'parent': {'child': 'value'}} will use placeholder
{parent-child}).
Placeholders for list elements have 'delimiter' followed by the index
(eg. {'parent': ['this', 'that']) will use placeholders {parent-0}
for 'this' and {parent-1} for 'that'.
@author vicyap
SAMPLE OUTPUT
{'full_text': 'Github: Everything operating normally'}
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 30
delimiter = '-'
format = None
timeout = 5
url = None
def getjson(self):
"""
"""
try:
json_data = self.py3.request(self.url, timeout=self.timeout).json()
json_data = self.py3.flatten_dict(json_data, self.delimiter, True)
except self.py3.RequestException:
json_data = None
if json_data:
full_text = self.py3.safe_format(self.format, json_data)
else:
full_text = ''
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | Python |
7d74e93ad25fe3ada5afa877ed09a0894e01252f | Remove unused import | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/status/views.py | cla_backend/apps/status/views.py | from django.db import connection, DatabaseError
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from cla_common.smoketest import smoketest
class JSONResponse(HttpResponse):
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
@csrf_exempt
def status(request):
if request.method == 'GET':
message = ''
c = None
try:
c = connection.cursor()
c.execute('SELECT 1')
row = c.fetchone()
db_ready = row[0] == 1
return JSONResponse({
'db': {
'ready': db_ready,
'message': message
}
})
except DatabaseError as e:
message = str(e)
finally:
if c:
c.close()
@csrf_exempt
def smoketests(request):
"""
Run smoke tests and return results as JSON datastructure
"""
from cla_backend.apps.status.tests.smoketests import SmokeTests
return JSONResponse(smoketest(SmokeTests))
| import os
from django.db import connection, DatabaseError
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from cla_common.smoketest import smoketest
class JSONResponse(HttpResponse):
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
@csrf_exempt
def status(request):
if request.method == 'GET':
message = ''
c = None
try:
c = connection.cursor()
c.execute('SELECT 1')
row = c.fetchone()
db_ready = row[0] == 1
return JSONResponse({
'db': {
'ready': db_ready,
'message': message
}
})
except DatabaseError as e:
message = str(e)
finally:
if c:
c.close()
@csrf_exempt
def smoketests(request):
"""
Run smoke tests and return results as JSON datastructure
"""
from cla_backend.apps.status.tests.smoketests import SmokeTests
return JSONResponse(smoketest(SmokeTests))
| mit | Python |
f2f74f53fe8f5b4ac4cd728c4181b3a66b4e873d | Change problem description so it actually makes sense | josienb/project_euler,josienb/project_euler | euler009.py | euler009.py | # Project Euler
# 9 - Special Pythagorean triplet
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a^2 + b^2 = c^2
#
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
#
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
| # euler 009
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a^2 + b^2 = c^2
#
# For example, 32 + 42 = 9 + 16 = 25 = 52.
#
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
| mit | Python |
cdd4989674791c57b872a0188d9478c2af278073 | Add mean prediction | johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification | evaluate.py | evaluate.py | import numpy as np
from functools import reduce
from bird.models.cuberun import CubeRun
from bird import utils
from bird import loader
nb_classes = 20
input_shape = (257, 512)
image_shape = input_shape
batch_size=32
def evaluate(model, data_filepath, file2labels_filepath):
(X_tests, Y_tests) = loader.load_test_data(data_filepath, file2labels_filepath,
nb_classes=nb_classes)
top_1 = 0
top_2 = 0
top_3 = 0
top_4 = 0
top_5 = 0
print("| Predicted | Ground Truth |")
print("|-----------|--------------|")
for X_test, Y_test in zip(X_tests, Y_tests):
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)
Y_preds = model.predict(X_test)
Y_mean = np.mean(Y_preds, axis=0)
y_pred = np.argmax(Y_mean)
y_preds = np.argsort(Y_mean)[::-1]
y_true = np.argmax(Y_test[0])
if y_true in y_preds[:1]:
top_1+=1
if y_true in y_preds[:2]:
top_2+=1
if y_true in y_preds[:3]:
top_3+=1
if y_true in y_preds[:4]:
top_4+=1
if y_true in y_preds[:5]:
top_5+=1
print("| ", y_preds[:5], " | ", y_true, " |")
print("Top 1:", top_1)
print("Top 2:", top_2)
print("Top 3:", top_3)
print("Top 4:", top_4)
print("Top 5:", top_5)
print("Total predictiosn: ", len(X_tests))
def to_str(x):
if len(x) == 0:
return ""
else:
ret = ""
for s in x:
ret = ret + ("," + str(s))
return ret
def binary_to_id(Y):
i = 0
r = []
for y in Y:
if y == 1:
r.append(i)
i = i+1
return r
model = CubeRun(nb_classes, input_shape)
model.load_weights("./weights/2016_11_30_14:13:35_cuberun.h5")
model.compile(loss="categorical_crossentropy", optimizer="adadelta")
evaluate(model, "./datasets/birdClef2016Subset_preprocessed/valid",
"datasets/birdClef2016Subset_preprocessed/valid/file2labels.csv")
| from models.cuberun import CubeRun
import numpy as np
import utils
import loader
nb_classes = 19
input_shape = (257, 509, 1)
(cols, rows, chs) = input_shape
image_shape = (cols, rows)
batch_size=32
def evaluate(model, data_filepath, file2labels_filepath):
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['fbeta_score'])
(X_test, Y_test, filenames) = loader.load_all_data(data_filepath, file2labels_filepath,
nb_classes=nb_classes,
image_shape=image_shape)
print("Predicting ...")
Y = model.predict(X_test, batch_size=batch_size, verbose=1)
Y = np.round(Y)
print("| Predicted | Ground Truth |")
print("|-----------|--------------|")
for (y, gt) in zip(Y, Y_test):
print("| ", binary_to_id(y), " | ", binary_to_id(gt), " |")
def binary_to_id(Y):
i = 0
r = []
for y in Y:
if y == 1:
r.append(i)
i = i+1
return r
| mit | Python |
26b440a0289901207745b2300d3833e0028f047a | use for-loop instead of while-loop | tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler | py_solutions_1-10/Euler_8.py | py_solutions_1-10/Euler_8.py | # find the greatest product of 5 consecutive digits in this 1000 digit number
import timeit
start = timeit.default_timer()
big_num = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"""
def euler_8(n):
n = ''.join(n.split())
high = 0
mul = lambda x, y: x * y
for v in xrange(len(n) - 5):
total = reduce(mul, map(int, n[v:v + 5]))
if total > high:
high = total
return high
print "Answer: %s" % euler_8(big_num)
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| # find the greatest product of 5 consecutive digits in this 1000 digit number
import timeit
start = timeit.default_timer()
big_num = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"""
def euler_8(n):
n = n.replace('\n', '')
n = ''.join([i for i in n if i != ' '])
position = 0
seq = ''
high = 0
while (position - 5) <= len(n):
total = 1
for i in n[position:position + 5]:
total = total * int(i)
if total > high:
high = total
seq = n[position:position + 5]
position += 1
return high
print "Answer: %s" % euler_8(big_num)
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| mit | Python |
70592355c4977934291bcdfc54bc93d84b262b94 | Add basic support. | ryan-roemer/django-cloud-browser,ryan-roemer/django-cloud-browser,UrbanDaddy/django-cloud-browser,ryan-roemer/django-cloud-browser,UrbanDaddy/django-cloud-browser | fabfile.py | fabfile.py | """Fabric file."""
from fabric.api import cd, local
###############################################################################
# Constants
###############################################################################
MOD = "cloud_browser"
PROJ = "cloud_browser_project"
CHECK_INCLUDES = (
"fabfile.py",
MOD,
PROJ,
)
PYLINT_CFG = "dev/pylint.cfg"
###############################################################################
# Quality
###############################################################################
def pylint(rcfile=PYLINT_CFG):
"""Run pylint style checker.
:param rcfile: PyLint configuration file.
"""
# Have a spurious DeprecationWarning in pylint.
local("python -W ignore::DeprecationWarning `which pylint` --rcfile=%s %s" %
(rcfile, " ".join(CHECK_INCLUDES)), capture=False)
def pep8():
"""Run pep8 style checker."""
local("pep8 -r %s" % " ".join(CHECK_INCLUDES), capture=False)
###############################################################################
# Django Targets
###############################################################################
def run_server(addr="127.0.0.1:8000"):
"""Run Django dev. server."""
with cd(PROJ):
local(
"python manage.py runserver --pythonpath='..' %s" % addr, capture=False)
| """Fabric file."""
from fabric.api import cd, local
###############################################################################
# Constants
###############################################################################
MOD = "cloud_browser"
PROJ = "cloud_browser_project"
PYLINT_INCLUDES = (
"fabfile.py",
MOD,
PROJ,
)
PYLINT_CFG = "dev/pylint.cfg"
###############################################################################
# Quality
###############################################################################
def pylint(rcfile=PYLINT_CFG):
"""Run pylint style checker.
:param rcfile: PyLint configuration file.
"""
# Have a spurious DeprecationWarning in pylint.
local("python -W ignore::DeprecationWarning `which pylint` --rcfile=%s %s" %
(rcfile, " ".join(PYLINT_INCLUDES)), capture=False)
###############################################################################
# Django Targets
###############################################################################
def run_server(addr="127.0.0.1:8000"):
"""Run Django dev. server."""
with cd(PROJ):
local(
"python manage.py runserver --pythonpath='..' %s" % addr, capture=False)
| mit | Python |
96b92ca853f2d937b81cfb1522fe201fa5c593c3 | Bump version to 0.2.3 | xrmx/django-skebby | django_skebby/__init__.py | django_skebby/__init__.py | __version__ = '0.2.3'
| __version__ = '0.2.2'
| bsd-3-clause | Python |
d0be18d4ca82771082c442c5f419704806ebd412 | Fix python3 incompatibility in table test | dwillmer/blaze,jcrist/blaze,maxalbert/blaze,scls19fr/blaze,cowlicks/blaze,nkhuyu/blaze,jdmcbr/blaze,alexmojaki/blaze,cowlicks/blaze,ChinaQuants/blaze,jdmcbr/blaze,cpcloud/blaze,caseyclements/blaze,mrocklin/blaze,alexmojaki/blaze,maxalbert/blaze,jcrist/blaze,dwillmer/blaze,ContinuumIO/blaze,ChinaQuants/blaze,LiaoPan/blaze,nkhuyu/blaze,xlhtc007/blaze,cpcloud/blaze,xlhtc007/blaze,LiaoPan/blaze,caseyclements/blaze,mrocklin/blaze,scls19fr/blaze,ContinuumIO/blaze | blaze/api/tests/test_table.py | blaze/api/tests/test_table.py | from blaze.api.table import Table, compute, table_repr
from blaze.data.python import Python
from blaze.compute.core import compute
from blaze.compute.python import compute
from datashape import dshape
import pandas as pd
data = (('Alice', 100),
('Bob', 200))
t = Table(data, columns=['name', 'amount'])
def test_resources():
assert t.resources() == {t: t.data}
def test_compute():
assert compute(t) == data
def test_compute():
assert list(compute(t['amount'] + 1)) == [101, 201]
def test_create_with_schema():
t = Table(data, schema='{name: string, amount: float32}')
assert t.schema == dshape('{name: string, amount: float32}')
def test_create_with_raw_data():
t = Table(data, columns=['name', 'amount'])
assert t.schema == dshape('{name: string, amount: int64}')
assert t.name
assert t.data == data
def test_create_with_data_descriptor():
schema='{name: string, amount: int64}'
ddesc = Python(data, schema=schema)
t = Table(ddesc)
assert t.schema == dshape(schema)
assert t.name
assert t.data == ddesc
def test_repr():
result = table_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = table_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = Table(tuple((i, i**2) for i in range(100)), columns=['x', 'y'])
result = table_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
def test_mutable_backed_repr():
mutable_backed_table = Table([[0]], columns=['col1'])
repr(mutable_backed_table)
def test_dataframe_backed_repr():
df = pd.DataFrame(data=[0], columns=['col1'])
dataframe_backed_table = Table(df)
repr(dataframe_backed_table)
| from blaze.api.table import Table, compute, table_repr
from blaze.data.python import Python
from blaze.compute.core import compute
from blaze.compute.python import compute
from datashape import dshape
import pandas as pd
data = (('Alice', 100),
('Bob', 200))
t = Table(data, columns=['name', 'amount'])
def test_resources():
assert t.resources() == {t: t.data}
def test_compute():
assert compute(t) == data
def test_compute():
assert list(compute(t['amount'] + 1)) == [101, 201]
def test_create_with_schema():
t = Table(data, schema='{name: string, amount: float32}')
assert t.schema == dshape('{name: string, amount: float32}')
def test_create_with_raw_data():
t = Table(data, columns=['name', 'amount'])
assert t.schema == dshape('{name: string, amount: int64}')
assert t.name
assert t.data == data
def test_create_with_data_descriptor():
schema='{name: string, amount: int64}'
ddesc = Python(data, schema=schema)
t = Table(ddesc)
assert t.schema == dshape(schema)
assert t.name
assert t.data == ddesc
def test_repr():
result = table_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = table_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = Table(tuple((i, i**2) for i in range(100)), columns=['x', 'y'])
result = table_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
def test_mutable_backed_repr():
mutable_data = [[0]]
mutable_backed_table = Table(mutable_data, columns=["mutable"])
repr(mutable_backed_table)
def test_dataframe_backed_repr():
mutable_data = range(2)
df = pd.DataFrame(data=mutable_data, columns=["mutable"])
dataframe_backed_table = Table(df)
repr(dataframe_backed_table)
| bsd-3-clause | Python |
feaeba32c6e3ec1ac354984f55ae96107d5acbdf | Add default value to EnvironmentVariable substitution (#225) | ros2/launch,ros2/launch,ros2/launch | launch/launch/substitutions/environment_variable.py | launch/launch/substitutions/environment_variable.py | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the EnvironmentVariable substitution."""
import os
from typing import List
from typing import Text
from ..launch_context import LaunchContext
from ..some_substitutions_type import SomeSubstitutionsType
from ..substitution import Substitution
class EnvironmentVariable(Substitution):
"""
Substitution that gets an environment variable value as a string.
If the environment variable is not found, it returns empty string.
"""
def __init__(
self,
name: SomeSubstitutionsType,
*,
default_value: SomeSubstitutionsType = ''
) -> None:
"""Constructor."""
super().__init__()
from ..utilities import normalize_to_list_of_substitutions # import here to avoid loop
self.__name = normalize_to_list_of_substitutions(name)
self.__default_value = normalize_to_list_of_substitutions(default_value)
@property
def name(self) -> List[Substitution]:
"""Getter for name."""
return self.__name
@property
def default_value(self) -> List[Substitution]:
"""Getter for default_value."""
return self.__default_value
def describe(self) -> Text:
"""Return a description of this substitution as a string."""
return 'EnvVar({})'.format(' + '.join([sub.describe() for sub in self.name]))
def perform(self, context: LaunchContext) -> Text:
"""Perform the substitution by looking up the environment variable."""
from ..utilities import perform_substitutions # import here to avoid loop
return os.environ.get(
perform_substitutions(context, self.name),
perform_substitutions(context, self.default_value)
)
| # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the EnvironmentVariable substitution."""
import os
from typing import List
from typing import Text
from ..launch_context import LaunchContext
from ..some_substitutions_type import SomeSubstitutionsType
from ..substitution import Substitution
class EnvironmentVariable(Substitution):
"""
Substitution that gets an environment variable value as a string.
If the environment variable is not found, it returns empty string.
"""
def __init__(self, name: SomeSubstitutionsType) -> None:
"""Constructor."""
super().__init__()
from ..utilities import normalize_to_list_of_substitutions # import here to avoid loop
self.__name = normalize_to_list_of_substitutions(name)
@property
def name(self) -> List[Substitution]:
"""Getter for name."""
return self.__name
def describe(self) -> Text:
"""Return a description of this substitution as a string."""
return 'EnvVar({})'.format(' + '.join([sub.describe() for sub in self.name]))
def perform(self, context: LaunchContext) -> Text:
"""Perform the substitution by looking up the environment variable."""
from ..utilities import perform_substitutions # import here to avoid loop
return os.environ.get(perform_substitutions(context, self.name), '')
| apache-2.0 | Python |
87f892731678049b5a706a36487982ebb9da3991 | Add API client to global envar | alexandermendes/pybossa-discourse | pybossa_discourse/globals.py | pybossa_discourse/globals.py | # -*- coding: utf8 -*-
"""Jinja globals module for pybossa-discourse."""
from flask import Markup, request
from . import discourse_client
class DiscourseGlobals(object):
"""A class to implement Discourse Global variables."""
def __init__(self, app):
self.url = app.config['DISCOURSE_URL']
self.api = discourse_client
app.jinja_env.globals.update(discourse=self)
def comments(self):
"""Return an HTML snippet used to embed Discourse comments."""
return Markup("""
<div id="discourse-comments"></div>
<script type="text/javascript">
DiscourseEmbed = {{
discourseUrl: '{0}/',
discourseEmbedUrl: '{1}'
}};
window.onload = function() {{
let d = document.createElement('script'),
head = document.getElementsByTagName('head')[0],
body = document.getElementsByTagName('body')[0];
d.type = 'text/javascript';
d.async = true;
d.src = '{0}/javascripts/embed.js';
(head || body).appendChild(d);
}}
</script>
""").format(self.url, request.base_url)
def notifications(self):
"""Return a count of unread notifications for the current user."""
notifications = discourse_client.user_notifications()
if not notifications:
return 0
return sum([1 for n in notifications['notifications']
if not n['read']])
| # -*- coding: utf8 -*-
"""Jinja globals module for pybossa-discourse."""
from flask import Markup, request
from . import discourse_client
class DiscourseGlobals(object):
"""A class to implement Discourse Global variables."""
def __init__(self, app):
self.url = app.config['DISCOURSE_URL']
app.jinja_env.globals.update(discourse=self)
def comments(self):
"""Return an HTML snippet used to embed Discourse comments."""
return Markup("""
<div id="discourse-comments"></div>
<script type="text/javascript">
DiscourseEmbed = {{
discourseUrl: '{0}/',
discourseEmbedUrl: '{1}'
}};
window.onload = function() {{
let d = document.createElement('script'),
head = document.getElementsByTagName('head')[0],
body = document.getElementsByTagName('body')[0];
d.type = 'text/javascript';
d.async = true;
d.src = '{0}/javascripts/embed.js';
(head || body).appendChild(d);
}}
</script>
""").format(self.url, request.base_url)
def notifications(self):
"""Return a count of unread notifications for the current user."""
notifications = discourse_client.user_notifications()
if not notifications:
return 0
return sum([1 for n in notifications['notifications']
if not n['read']])
| bsd-3-clause | Python |
566c9088033158b4b707090a616a5952841c57aa | Correct an error in random_one_hot_topological_dense_design_matrix | nouiz/pylearn2,Refefer/pylearn2,lamblin/pylearn2,JesseLivezey/pylearn2,hantek/pylearn2,pombredanne/pylearn2,mkraemer67/pylearn2,jeremyfix/pylearn2,bartvm/pylearn2,caidongyun/pylearn2,matrogers/pylearn2,alexjc/pylearn2,jeremyfix/pylearn2,alexjc/pylearn2,jamessergeant/pylearn2,JesseLivezey/plankton,TNick/pylearn2,JesseLivezey/pylearn2,pombredanne/pylearn2,lunyang/pylearn2,matrogers/pylearn2,jamessergeant/pylearn2,Refefer/pylearn2,lancezlin/pylearn2,fulmicoton/pylearn2,JesseLivezey/pylearn2,sandeepkbhat/pylearn2,TNick/pylearn2,msingh172/pylearn2,sandeepkbhat/pylearn2,goodfeli/pylearn2,shiquanwang/pylearn2,hantek/pylearn2,goodfeli/pylearn2,mclaughlin6464/pylearn2,chrish42/pylearn,fishcorn/pylearn2,theoryno3/pylearn2,sandeepkbhat/pylearn2,pkainz/pylearn2,lamblin/pylearn2,hyqneuron/pylearn2-maxsom,msingh172/pylearn2,JesseLivezey/plankton,abergeron/pylearn2,daemonmaker/pylearn2,Refefer/pylearn2,chrish42/pylearn,fulmicoton/pylearn2,lancezlin/pylearn2,junbochen/pylearn2,aalmah/pylearn2,aalmah/pylearn2,JesseLivezey/plankton,se4u/pylearn2,fyffyt/pylearn2,goodfeli/pylearn2,alexjc/pylearn2,mclaughlin6464/pylearn2,kose-y/pylearn2,KennethPierce/pylearnk,junbochen/pylearn2,ashhher3/pylearn2,junbochen/pylearn2,w1kke/pylearn2,lunyang/pylearn2,cosmoharrigan/pylearn2,abergeron/pylearn2,lamblin/pylearn2,daemonmaker/pylearn2,JesseLivezey/pylearn2,skearnes/pylearn2,se4u/pylearn2,se4u/pylearn2,Refefer/pylearn2,kastnerkyle/pylearn2,lisa-lab/pylearn2,kastnerkyle/pylearn2,TNick/pylearn2,nouiz/pylearn2,goodfeli/pylearn2,fyffyt/pylearn2,abergeron/pylearn2,KennethPierce/pylearnk,shiquanwang/pylearn2,ashhher3/pylearn2,cosmoharrigan/pylearn2,lunyang/pylearn2,mclaughlin6464/pylearn2,lisa-lab/pylearn2,bartvm/pylearn2,TNick/pylearn2,jeremyfix/pylearn2,caidongyun/pylearn2,ddboline/pylearn2,kastnerkyle/pylearn2,msingh172/pylearn2,theoryno3/pylearn2,fulmicoton/pylearn2,jamessergeant/pylearn2,woozzu/pylearn2,kose-y/pylearn2,pkainz/pylearn2,fishcorn/pylearn2,w1kke/pylearn2,hyqneuron/pylearn2-maxsom,nouiz/pylearn2,aalmah/pylearn2,fyffyt/pylearn2,ddboline/pylearn2,KennethPierce/pylearnk,ddboline/pylearn2,w1kke/pylearn2,lisa-lab/pylearn2,kastnerkyle/pylearn2,matrogers/pylearn2,skearnes/pylearn2,daemonmaker/pylearn2,mclaughlin6464/pylearn2,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,junbochen/pylearn2,fulmicoton/pylearn2,msingh172/pylearn2,fyffyt/pylearn2,skearnes/pylearn2,aalmah/pylearn2,mkraemer67/pylearn2,cosmoharrigan/pylearn2,CIFASIS/pylearn2,caidongyun/pylearn2,jamessergeant/pylearn2,bartvm/pylearn2,woozzu/pylearn2,hantek/pylearn2,CIFASIS/pylearn2,theoryno3/pylearn2,mkraemer67/pylearn2,lancezlin/pylearn2,mkraemer67/pylearn2,hyqneuron/pylearn2-maxsom,JesseLivezey/plankton,kose-y/pylearn2,abergeron/pylearn2,bartvm/pylearn2,matrogers/pylearn2,pkainz/pylearn2,pombredanne/pylearn2,pkainz/pylearn2,lisa-lab/pylearn2,nouiz/pylearn2,woozzu/pylearn2,theoryno3/pylearn2,kose-y/pylearn2,hantek/pylearn2,shiquanwang/pylearn2,se4u/pylearn2,pombredanne/pylearn2,jeremyfix/pylearn2,alexjc/pylearn2,chrish42/pylearn,ashhher3/pylearn2,w1kke/pylearn2,daemonmaker/pylearn2,caidongyun/pylearn2,CIFASIS/pylearn2,KennethPierce/pylearnk,skearnes/pylearn2,chrish42/pylearn,lunyang/pylearn2,lamblin/pylearn2,ashhher3/pylearn2,lancezlin/pylearn2,ddboline/pylearn2,fishcorn/pylearn2,fishcorn/pylearn2,hyqneuron/pylearn2-maxsom,shiquanwang/pylearn2,woozzu/pylearn2,CIFASIS/pylearn2 | pylearn2/testing/datasets.py | pylearn2/testing/datasets.py | """ Simple datasets to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "Ian Goodfellow"
__email__ = "goodfeli@iro"
import numpy as np
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class ArangeDataset(DenseDesignMatrix):
"""
A dataset where example i is just the number i. Makes it easy to track
which sets of examples are visited.
Parameters
----------
num_examples : WRITEME
"""
def __init__(self, num_examples):
X = np.zeros((num_examples,1))
X[:,0] = np.arange(num_examples)
super(ArangeDataset, self).__init__(X)
def random_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
Y = rng.randint(0, num_classes, (num_examples,1))
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples,num_classes))
for i in xrange(num_examples):
Y[i,idx[i]] = 1
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_topological_dense_design_matrix(rng, num_examples, shape, channels, axes, num_classes):
dims = {
'b': num_examples,
'c': channels
}
for i, dim in enumerate(shape):
dims[i] = dim
shape = [dims[axis] for axis in axes]
X = rng.randn(*shape)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples,num_classes))
for i in xrange(num_examples):
Y[i,idx[i]] = 1
return DenseDesignMatrix(topo_view=X, axes=axes, y=Y)
| """ Simple datasets to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "Ian Goodfellow"
__email__ = "goodfeli@iro"
import numpy as np
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class ArangeDataset(DenseDesignMatrix):
"""
A dataset where example i is just the number i. Makes it easy to track
which sets of examples are visited.
Parameters
----------
num_examples : WRITEME
"""
def __init__(self, num_examples):
X = np.zeros((num_examples,1))
X[:,0] = np.arange(num_examples)
super(ArangeDataset, self).__init__(X)
def random_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
Y = rng.randint(0, num_classes, (num_examples,1))
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples,num_classes))
for i in xrange(num_examples):
Y[i,idx[i]] = 1
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_topological_dense_design_matrix(rng, num_examples, shape, channels, axes, num_classes):
dims = {
'b': num_examples,
'c': channels
}
for i, dim in enumerate(shape):
dims[i] = dim
shape = [dims[axis] for axis in axes]
X = rng.randn(*shape)
idx = rng.randint(0, dim, (num_examples,))
Y = np.zeros((num_examples,num_classes))
for i in xrange(num_examples):
Y[i,idx[i]] = 1
return DenseDesignMatrix(topo_view=X, axes=axes, y=Y)
| bsd-3-clause | Python |
15621212e635792bd6e5f28a58052d6b881d42de | Call test func properly. | bueda/django-comrade | fabfile.py | fabfile.py | #!/usr/bin/env python
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fab_shared import _nose_test, _test, _package_deploy as deploy
env.unit = "django-comrade"
env.root_dir = os.path.abspath(os.path.dirname(__file__))
env.scm = env.root_dir
env.allow_no_tag = True
env.upload_to_s3 = True
@runs_once
def test(dir=None):
_test(_nose_test, dir)
| #!/usr/bin/env python
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fab_shared import _nose_test, _test, _package_deploy as deploy
env.unit = "django-comrade"
env.root_dir = os.path.abspath(os.path.dirname(__file__))
env.scm = env.root_dir
env.allow_no_tag = True
env.upload_to_s3 = True
@runs_once
def test(dir=None):
_test(_nose_test)
| mit | Python |
edb07b507aa93ead278cecd168da83a4be68b2ba | Disable front-end testing on Travis | onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site | bluebottle/settings/travis.py | bluebottle/settings/travis.py |
# SECRET_KEY and DATABASES needs to be defined before the base settings is imported.
SECRET_KEY = 'hbqnTEq+m7Tk61bvRV/TLANr3i0WZ6hgBXDh3aYpSU8m+E1iCtlU3Q=='
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
from .base import *
#
# Put the travis-ci environment specific overrides below.
#
# Disable Selenium testing for now on Travis because it fails inconsistent.
# SELENIUM_TESTS = True |
# SECRET_KEY and DATABASES needs to be defined before the base settings is imported.
SECRET_KEY = 'hbqnTEq+m7Tk61bvRV/TLANr3i0WZ6hgBXDh3aYpSU8m+E1iCtlU3Q=='
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
from .base import *
#
# Put the travis-ci environment specific overrides below.
#
SELENIUM_TESTS = True | bsd-3-clause | Python |
a714e48ad09ce3c730cbd8b3ad04ffefba4b8661 | Remove obsolete "babel compile" command | Turbo87/skylines,RBE-Avionik/skylines,RBE-Avionik/skylines,Turbo87/skylines,skylines-project/skylines,Turbo87/skylines,Turbo87/skylines,RBE-Avionik/skylines,shadowoneau/skylines,Harry-R/skylines,RBE-Avionik/skylines,skylines-project/skylines,skylines-project/skylines,shadowoneau/skylines,Harry-R/skylines,shadowoneau/skylines,shadowoneau/skylines,Harry-R/skylines,skylines-project/skylines,Harry-R/skylines | fabfile.py | fabfile.py | from fabric.api import env, task, local, cd, lcd, run, sudo, put
from tempfile import NamedTemporaryFile
env.use_ssh_config = True
env.hosts = ['skylines@skylines']
APP_DIR = '/home/skylines'
SRC_DIR = '%s/src' % APP_DIR
@task
def deploy(branch='master', force=False):
deploy_ember()
push(branch, force)
restart()
@task
def deploy_ember():
with lcd('ember'):
local('node_modules/.bin/ember deploy production -v')
@task
def push(branch='HEAD', force=False):
cmd = 'git push %s:%s %s:master' % (env.host_string, SRC_DIR, branch)
if force:
cmd += ' --force'
local(cmd)
@task
def restart():
with cd(SRC_DIR):
run('git reset --hard')
# do database migrations
manage('migrate upgrade')
# restart services
restart_service('skylines-api')
restart_service('skylines')
restart_service('mapserver')
restart_service('tracking')
restart_service('celery')
restart_service('mapproxy')
@task
def restart_service(service):
# Using the sudo() command somehow always provokes a password prompt,
# even if NOPASSWD is specified in the sudoers file...
run('sudo supervisorctl restart %s' % service)
@task
def manage(cmd, user=None):
with cd(SRC_DIR):
if user:
sudo('./manage.py %s' % cmd, user=user)
else:
run('./manage.py %s' % cmd)
@task
def update_mapproxy():
with NamedTemporaryFile() as f:
content = open('mapserver/mapproxy/mapproxy.yaml').read()
content = content.replace(
'base_dir: \'/tmp/cache_data\'',
'base_dir: \'%s/cache/mapproxy\'' % APP_DIR,
)
content = content.replace(
'lock_dir: \'/tmp/cache_data/tile_locks\'',
'lock_dir: \'%s/cache/mapproxy/tile_locks\'' % APP_DIR,
)
f.write(content)
f.flush()
put(f.name, '%s/config/mapproxy.yaml' % APP_DIR)
@task
def pip_install():
with cd(SRC_DIR):
run('git reset --hard')
run('pip install -e .')
@task
def clean_mapproxy_cache():
with cd('/home/skylines/cache/mapproxy'):
run('rm -rv *')
| from fabric.api import env, task, local, cd, lcd, run, sudo, put
from tempfile import NamedTemporaryFile
env.use_ssh_config = True
env.hosts = ['skylines@skylines']
APP_DIR = '/home/skylines'
SRC_DIR = '%s/src' % APP_DIR
@task
def deploy(branch='master', force=False):
deploy_ember()
push(branch, force)
restart()
@task
def deploy_ember():
with lcd('ember'):
local('node_modules/.bin/ember deploy production -v')
@task
def push(branch='HEAD', force=False):
cmd = 'git push %s:%s %s:master' % (env.host_string, SRC_DIR, branch)
if force:
cmd += ' --force'
local(cmd)
@task
def restart():
with cd(SRC_DIR):
run('git reset --hard')
# compile i18n .mo files
manage('babel compile')
# do database migrations
manage('migrate upgrade')
# restart services
restart_service('skylines-api')
restart_service('skylines')
restart_service('mapserver')
restart_service('tracking')
restart_service('celery')
restart_service('mapproxy')
@task
def restart_service(service):
# Using the sudo() command somehow always provokes a password prompt,
# even if NOPASSWD is specified in the sudoers file...
run('sudo supervisorctl restart %s' % service)
@task
def manage(cmd, user=None):
with cd(SRC_DIR):
if user:
sudo('./manage.py %s' % cmd, user=user)
else:
run('./manage.py %s' % cmd)
@task
def update_mapproxy():
with NamedTemporaryFile() as f:
content = open('mapserver/mapproxy/mapproxy.yaml').read()
content = content.replace(
'base_dir: \'/tmp/cache_data\'',
'base_dir: \'%s/cache/mapproxy\'' % APP_DIR,
)
content = content.replace(
'lock_dir: \'/tmp/cache_data/tile_locks\'',
'lock_dir: \'%s/cache/mapproxy/tile_locks\'' % APP_DIR,
)
f.write(content)
f.flush()
put(f.name, '%s/config/mapproxy.yaml' % APP_DIR)
@task
def pip_install():
with cd(SRC_DIR):
run('git reset --hard')
run('pip install -e .')
@task
def clean_mapproxy_cache():
with cd('/home/skylines/cache/mapproxy'):
run('rm -rv *')
| agpl-3.0 | Python |
bc3c057a2cc775bcce690e0e9019c2907b638101 | Bump version | thombashi/pytablereader,thombashi/pytablereader,thombashi/pytablereader | pytablereader/__version__.py | pytablereader/__version__.py | # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.25.5"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.25.4"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| mit | Python |
da4900798c2b052f3d317cc419e829cebfb3701c | Remove unused lines of code | thombashi/pytablereader,thombashi/pytablereader,thombashi/pytablereader | pytablereader/sqlite/core.py | pytablereader/sqlite/core.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .._constant import TableNameTemplate as tnt
from .._validator import FileValidator
from ..interface import TableLoader
from .formatter import SqliteTableFormatter
class SqliteFileLoader(TableLoader):
"""
A file loader class to extract tabular data from SQLite database files.
:param str file_path: Path to the loading SQLite database file.
.. py:attribute:: table_name
Table name string. Defaults to ``%(filename)s_%(key)s``.
"""
@property
def format_name(self):
return "sqlite"
def __init__(self, file_path=None):
super(SqliteFileLoader, self).__init__(file_path)
self._validator = FileValidator(file_path)
def load(self):
"""
Extract tabular data as |TableData| instances from a SQLite database
file. |load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
format specifier value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` ``%(format_name)s%(format_id)s``
``%(format_name)s`` ``"sqlite"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.error.InvalidDataError:
If the Markdown data is invalid or empty.
"""
self._validate()
formatter = SqliteTableFormatter(self.source)
formatter.accept(self)
return formatter.to_table_data()
def _get_default_table_name_template(self):
return "{:s}{:s}".format(tnt.FORMAT_NAME, tnt.FORMAT_ID)
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .._constant import TableNameTemplate as tnt
from .._validator import FileValidator
from ..interface import TableLoader
from .formatter import SqliteTableFormatter
class SqliteFileLoader(TableLoader):
"""
A file loader class to extract tabular data from SQLite database files.
:param str file_path: Path to the loading SQLite database file.
.. py:attribute:: table_name
Table name string. Defaults to ``%(filename)s_%(key)s``.
"""
@property
def format_name(self):
return "sqlite"
def __init__(self, file_path=None):
super(SqliteFileLoader, self).__init__(file_path)
self._validator = FileValidator(file_path)
def load(self):
"""
Extract tabular data as |TableData| instances from a SQLite database
file. |load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
format specifier value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` ``%(format_name)s%(format_id)s``
``%(format_name)s`` ``"sqlite"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.error.InvalidDataError:
If the Markdown data is invalid or empty.
"""
from simplesqlite import SimpleSQLite
self._validate()
#formatter = SqliteTableFormatter(SimpleSQLite(self.source, "r"))
formatter = SqliteTableFormatter(self.source)
formatter.accept(self)
return formatter.to_table_data()
def _get_default_table_name_template(self):
return "{:s}{:s}".format(tnt.FORMAT_NAME, tnt.FORMAT_ID)
| mit | Python |
9a6dcfdd3c4b445089b74840d55f420f038833d4 | Update MM_filter_ont_1_AM.py | amojarro/carrierseq,amojarro/carrierseq | python/MM_filter_ont_1_AM.py | python/MM_filter_ont_1_AM.py | from Bio import SeqIO
import math
from Tkinter import Tk
name = '/DataFolder/02_seqtk/unmapped_reads.fastq'
qs = 9
output = '/DataFolder/03_fastq9/unmapped_reads_q9'
count = 0
for rec in SeqIO.parse(name, "fastq"):
count += 1
print("%i reads in fastq file" % count)
qual_sequences = [] # Setup an empty list
cnt = 0
for rec in SeqIO.parse(name, "fastq"):
rec.letter_annotations["phred_quality"]
probs = []
for q in rec.letter_annotations["phred_quality"]:
e = float(math.pow(10.0,-1*(float(q)/10.0)))
# print q, e
probs.append(e)
av_prob = float(sum(probs))/float(len((rec.letter_annotations["phred_quality"])))
# print av_prob
av_q = float(-10.0*(math.log10(float(av_prob))))
# print av_prob, av_q
if av_q >= qs:
cnt += 1
qual_sequences.append(rec)
print cnt,'quality reads saved'
output_handle = open(output +'.fa', "w")
SeqIO.write(qual_sequences, output_handle, "fasta")
output_handle.close()
output_handle = open(output +'.fq', "w")
SeqIO.write(qual_sequences, output_handle, "fastq")
output_handle.close()
| from Bio import SeqIO
import math
from Tkinter import Tk
name = '/$DataFolder/02_seqtk/unmapped_reads.fastq'
qs = 9
output = '/$DataFolder/03_fastq9/unmapped_reads_q9'
count = 0
for rec in SeqIO.parse(name, "fastq"):
count += 1
print("%i reads in fastq file" % count)
qual_sequences = [] # Setup an empty list
cnt = 0
for rec in SeqIO.parse(name, "fastq"):
rec.letter_annotations["phred_quality"]
probs = []
for q in rec.letter_annotations["phred_quality"]:
e = float(math.pow(10.0,-1*(float(q)/10.0)))
# print q, e
probs.append(e)
av_prob = float(sum(probs))/float(len((rec.letter_annotations["phred_quality"])))
# print av_prob
av_q = float(-10.0*(math.log10(float(av_prob))))
# print av_prob, av_q
if av_q >= qs:
cnt += 1
qual_sequences.append(rec)
print cnt,'quality reads saved'
output_handle = open(output +'.fa', "w")
SeqIO.write(qual_sequences, output_handle, "fasta")
output_handle.close()
output_handle = open(output +'.fq', "w")
SeqIO.write(qual_sequences, output_handle, "fastq")
output_handle.close()
| mit | Python |
449c49901e4a8e202cb4cf08c7404747c7be54d9 | Create models __init__.py file. | MichaelCurrin/twitterverse,MichaelCurrin/twitterverse | app/models/__init__.py | app/models/__init__.py | """
Initialisation file for models directory.
Note that the model files cannot be be imported directly with
`python -m models/{model}.py`, if they have been included here. Since
this __init__ file will add the table names to the name space before the
file is run, which causes a conflict.
"""
# Create an _`_all__` list here, using values set in other application files.
from .places import __all__ as placesModel
from .trends import __all__ as trendsModel
from .tweets import __all__ as tweetsModel
from .cronJobs import __all__ as cronJobsModel
__all__ = placesModel + trendsModel + tweetsModel + cronJobsModel
# Make table objects available on models module.
from .places import *
from .trends import *
from .tweets import *
from .cronJobs import *
| """
Initialisation file for models directory.
"""
# Create an _`_all__` list here, using values set in other application files.
from .places import __all__ as placesModel
from .trends import __all__ as trendsModel
from .tweets import __all__ as tweetsModel
from .cronJobs import __all__ as cronJobsModel
__all__ = placesModel + trendsModel + tweetsModel + cronJobsModel
# Make table objects available on models module.
from .places import *
from .trends import *
from .tweets import *
from .cronJobs import *
| mit | Python |
9338814b45c7ee658955e9342ae5b87bf577e83b | Fix deprecated can_build warning | oamldev/oamlGodotModule,oamldev/oamlGodotModule,oamldev/oamlGodotModule | config.py | config.py | def can_build(env, platform):
return True
def configure(env):
pass
| def can_build(platform):
return True
def configure(env):
pass
| mit | Python |
9a84296ead813a926b61f296020c55574df0e151 | clean config | buxx/synergine | config.py | config.py | from module.lifegame.synergy.collection.LifeGameCollection import LifeGameCollection
from module.lifegame.synergy.LifeGameSimulation import LifeGameSimulation
from module.lifegame.synergy.collection.LifeGameCollectionConfiguration import LifeGameCollectionConfiguration
from module.lifegame.display.curses_visualisation import visualisation as curses_visualisation
from module.lifegame.display.pygame_visualisation import visualisation as pygame_visualisation
from synergine.src.display.CursesDisplay import CursesDisplay
from synergine.src.display.TestDisplay import TestDisplay
from synergine.src.display.PygameDisplay import PygameDisplay
config = {
'engine': {
'fpsmax': 25,
'debug': {
'mainprocess': False,
'cycles': range(100)
}
},
'simulations' : [LifeGameSimulation([LifeGameCollection(LifeGameCollectionConfiguration())])],
'connections': [TestDisplay(), PygameDisplay(pygame_visualisation), CursesDisplay(curses_visualisation)],
'other': {}
} | from module.lifegame.synergy.collection.LifeGameCollection import LifeGameCollection
from module.lifegame.synergy.LifeGameSimulation import LifeGameSimulation
from module.lifegame.synergy.collection.LifeGameCollectionConfiguration import LifeGameCollectionConfiguration
from module.lifegame.display.curses_visualisation import visualisation as curses_visualisation
from module.lifegame.display.pygame_visualisation import visualisation as pygame_visualisation
from synergine.src.display.CursesDisplay import CursesDisplay
from synergine.src.display.TestDisplay import TestDisplay
from synergine.src.display.PygameDisplay import PygameDisplay
config = {
'engine': {
'fpsmax': 25,
'debug': {
'mainprocess': False,
'cycles': range(100)
}
},
'simulations' : [LifeGameSimulation([LifeGameCollection(LifeGameCollectionConfiguration())])],
'connections': [TestDisplay(), PygameDisplay(pygame_visualisation), CursesDisplay(curses_visualisation)],
'other': {
'action_manager': {
'max_recursions': 1000
}
}
} | apache-2.0 | Python |
f3f428480a8e61bf22532503680e718fd5f0d286 | Write the first view - news feed. | pure-python/brainmate | fb/views.py | fb/views.py | from django.shortcuts import render
from fb.models import UserPost
def index(request):
if request.method == 'GET':
posts = UserPost.objects.all()
context = {
'posts': posts,
}
return render(request, 'index.html', context)
| from django.shortcuts import render
# Create your views here.
| apache-2.0 | Python |
72f429d74f04a700aa0240f1cb934dfcca979384 | Remove comments | gateway4labs/labmanager,porduna/labmanager,go-lab/labmanager,morelab/labmanager,morelab/labmanager,labsland/labmanager,gateway4labs/labmanager,gateway4labs/labmanager,porduna/labmanager,go-lab/labmanager,porduna/labmanager,porduna/labmanager,go-lab/labmanager,morelab/labmanager,gateway4labs/labmanager,labsland/labmanager,go-lab/labmanager,labsland/labmanager,morelab/labmanager,labsland/labmanager | config.py | config.py | import os
import yaml
LAB_ENV = os.environ.get('LAB_ENV', 'development')
env_config = yaml.load(open('labmanager/config/database.yml'))[LAB_ENV]
# Have you run... "pip install git+https://github.com/lms4labs/rlms_weblabdeusto.git" first?
RLMS = ['weblabdeusto','unr']
SQLALCHEMY_ENGINE_STR = os.environ.get('DATABASE_URL', None)
USE_PYMYSQL = env_config.get('pymsql', False)
if SQLALCHEMY_ENGINE_STR is None:
if env_config['engine'] == 'mysql':
SQLALCHEMY_ENGINE_STR = "mysql://%s:%s@%s/%s" % \
(env_config['username'], env_config['password'],
env_config['host'], env_config['dbname'])
elif env_config['engine'] == 'sqlite':
SQLALCHEMY_ENGINE_STR = "sqlite:///%s.db" % env_config['dbname']
elif env_config['engine'] == 'postgres':
SQLALCHEMY_ENGINE_STR = "postgresql+%s://%s:%s@%s/%s" % \
(env_config['driver'], env_config['username'],
env_config['password'], env_config['host'], env_config['dbname'])
elif env_config['engine'] == 'oracle':
SQLALCHEMY_ENGINE_STR = "oracle://%s:%S@%s/%s" % \
(env_config['username'], env_config['password'], env_config['host']
, env_config['dbname'])
#
# Flask configuration
#
DEBUG = True
SECRET_KEY = 'secret'
DEBUGGING_REQUESTS = False
| import os
import yaml
LAB_ENV = os.environ.get('LAB_ENV', 'development')
env_config = yaml.load(open('labmanager/config/database.yml'))[LAB_ENV]
# Have you run... "pip install git+https://github.com/lms4labs/rlms_weblabdeusto.git" first?
RLMS = ['weblabdeusto','unr']
SQLALCHEMY_ENGINE_STR = os.environ.get('DATABASE_URL', None)
USE_PYMYSQL = env_config.get('pymsql', False)
if SQLALCHEMY_ENGINE_STR is None:
if env_config['engine'] == 'mysql':
SQLALCHEMY_ENGINE_STR = "mysql://%s:%s@%s/%s" % \
(env_config['username'], env_config['password'],
env_config['host'], env_config['dbname'])
elif env_config['engine'] == 'sqlite':
SQLALCHEMY_ENGINE_STR = "sqlite:///%s.db" % env_config['dbname']
elif env_config['engine'] == 'postgres':
SQLALCHEMY_ENGINE_STR = "postgresql+%s://%s:%s@%s/%s" % \
(env_config['driver'], env_config['username'],
env_config['password'], env_config['host'], env_config['dbname'])
elif env_config['engine'] == 'oracle':
SQLALCHEMY_ENGINE_STR = "oracle://%s:%S@%s/%s" % \
(env_config['username'], env_config['password'], env_config['host']
, env_config['dbname'])
print SQLALCHEMY_ENGINE_STR
#
# Flask configuration
#
DEBUG = True
SECRET_KEY = 'secret'
DEBUGGING_REQUESTS = False
#
# heroku = os.environ.get('HEROKU', None)
# testing = os.environ.get('TESTING_LABMANAGER', None)
# if heroku:
# SQLALCHEMY_ENGINE_STR = os.environ.get('DATABASE_URL')
# USE_PYMYSQL = False
# elif testing:
# SQLALCHEMY_ENGINE_STR = os.environ['TESTING_LABMANAGER']
# USE_PYMYSQL = False
# else:
#
# #
# # DB Configuration
# #
# USERNAME = 'labmanager'
# PASSWORD = 'labmanager'
# HOST = 'localhost'
# DBNAME = 'labmanager'
#
# ENGINE = 'mysql' # or 'sqlite', 'postgresql', 'oracle'
# USE_PYMYSQL = False
#
# | bsd-2-clause | Python |
6b3a3d1ccf2f54d859b25904bf33edf917931529 | fix url | akakou/Incoming-WebHooks-Bot | incoming-webhooks.py | incoming-webhooks.py | # coding:utf-8
'''This program is class for incoming webhooks'''
import requests
import json
class IncomingWebhooks:
"""Incoming webhooks"""
def __init__(self, url='', text=u'', username=u'', icon_emoji=u'', link_names=0):
"""Set Property"""
self.url = url
self.data = json.dumps({
'text': text, # text
'username': username, # user name
'icon_emoji': icon_emoji, # profile emoji
'link_names': link_names, # mention
})
def send(self):
"""Send to Slack"""
# send
requests.post(
self.url,
self.data
)
| # coding:utf-8
'''This program is class for incoming webhooks'''
import requests
import json
class IncomingWebhooks:
"""Incoming webhooks"""
def __init__(self, url, text, username=u'', icon_emoji=u'', link_names=0):
"""Set Property"""
self.url = ''
self.data = json.dumps({
'text': text, # text
'username': username, # user name
'icon_emoji': icon_emoji, # profile emoji
'link_names': link_names, # mention
})
def send(self):
"""Send to Slack"""
# send
requests.post(
self.url,
self.data
)
| mit | Python |
a0f12252a83d3a04cfc3b73be0fa7d39809bfd59 | Bump version. | CuBoulder/atlas,CUDEN-CLAS/atlas,CUDEN-CLAS/atlas,CuBoulder/atlas,CuBoulder/atlas | config.py | config.py | """
Configuration file for Atlas
All variable settings should go here so values can be propagated to the various
functions from a central location.
"""
import re
import os
# Set Atlas location
atlas_location = os.path.dirname(os.path.realpath(__file__))
# Import config_servers.py.
try:
from config_servers import *
except ImportError:
raise Exception("You need a config_servers.py file!")
# Import config_local.py.
try:
from config_local import *
except ImportError:
raise Exception("You need a config_local.py file!")
# Verify code_root is correctly formed.
begin_with_slash = re.compile("^/")
trailing_slash = re.compile("/$")
# Uses re.match primitive to look from the beginning.
if not begin_with_slash.match(code_root):
raise Exception("'code_root' should begin with a slash.")
if not begin_with_slash.match(sites_web_root):
raise Exception("'sites_web_root' should begin with a slash.")
if not begin_with_slash.match(sites_code_root):
raise Exception("'sites_code_root' should begin with a slash.")
# Uses re.search primitive to look anywhere in the string.
if trailing_slash.search(code_root):
raise Exception("'code_root' should not have a trailing slash.")
if trailing_slash.search(sites_web_root):
raise Exception("'sites_web_root' should not have a trailing slash.")
if trailing_slash.search(sites_web_root):
raise Exception("'sites_web_root' should not have a trailing slash.")
# This allows us to use a self signed cert for local dev.
ssl_verification = True
if environment == 'local':
ssl_verification = False
version_number = '1.0.25'
| """
Configuration file for Atlas
All variable settings should go here so values can be propagated to the various
functions from a central location.
"""
import re
import os
# Set Atlas location
atlas_location = os.path.dirname(os.path.realpath(__file__))
# Import config_servers.py.
try:
from config_servers import *
except ImportError:
raise Exception("You need a config_servers.py file!")
# Import config_local.py.
try:
from config_local import *
except ImportError:
raise Exception("You need a config_local.py file!")
# Verify code_root is correctly formed.
begin_with_slash = re.compile("^/")
trailing_slash = re.compile("/$")
# Uses re.match primitive to look from the beginning.
if not begin_with_slash.match(code_root):
raise Exception("'code_root' should begin with a slash.")
if not begin_with_slash.match(sites_web_root):
raise Exception("'sites_web_root' should begin with a slash.")
if not begin_with_slash.match(sites_code_root):
raise Exception("'sites_code_root' should begin with a slash.")
# Uses re.search primitive to look anywhere in the string.
if trailing_slash.search(code_root):
raise Exception("'code_root' should not have a trailing slash.")
if trailing_slash.search(sites_web_root):
raise Exception("'sites_web_root' should not have a trailing slash.")
if trailing_slash.search(sites_web_root):
raise Exception("'sites_web_root' should not have a trailing slash.")
# This allows us to use a self signed cert for local dev.
ssl_verification = True
if environment == 'local':
ssl_verification = False
version_number = '1.0.24'
| mit | Python |
7e25321a6f6b0c6817eb14232f732cfccab8fdd8 | Update utils.py | robcza/intelmq,robcza/intelmq,certtools/intelmq,certtools/intelmq,pkug/intelmq,aaronkaplan/intelmq,aaronkaplan/intelmq,sch3m4/intelmq,sch3m4/intelmq,sch3m4/intelmq,certtools/intelmq,pkug/intelmq,pkug/intelmq,sch3m4/intelmq,pkug/intelmq,robcza/intelmq,robcza/intelmq,aaronkaplan/intelmq | intelmq/lib/utils.py | intelmq/lib/utils.py | import logging
import hashlib
def decode(text, encodings=["utf-8", "ISO-8859-15"], force=False):
for encoding in encodings:
try:
return text.decode(encoding)
except ValueError as e:
pass
if force:
for encoding in encodings:
try:
return text.decode(encoding, 'ignore')
except ValueError as e:
pass
raise Exception("Found a problem when decoding.")
def encode(text, encodings=["utf-8"], force=False):
for encoding in encodings:
try:
return text.encode(encoding)
except ValueError as e:
pass
if force:
for encoding in encodings:
try:
return text.decode(encoding, 'ignore')
except ValueError as e:
pass
raise Exception("Found a problem when encoding.")
def log(logs_path, name, loglevel="DEBUG"):
logger = logging.getLogger(name)
logger.setLevel(loglevel)
handler = logging.FileHandler("%s/%s.log" % (logs_path, name))
handler.setLevel(loglevel)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def hashgen(data, func=hashlib.sha1):
result = func()
result.update(data)
return result.hexdigest()
| import logging
import hashlib
def decode(text, encodings=["utf-8"], force=False):
for encoding in encodings:
try:
return text.decode(encoding)
except ValueError as e:
pass
if force:
for encoding in encodings:
try:
return text.decode(encoding, 'ignore')
except ValueError as e:
pass
raise Exception("Found a problem when decoding.")
def encode(text, encodings=["utf-8"], force=False):
for encoding in encodings:
try:
return text.encode(encoding)
except ValueError as e:
pass
if force:
for encoding in encodings:
try:
return text.decode(encoding, 'ignore')
except ValueError as e:
pass
raise Exception("Found a problem when encoding.")
def log(logs_path, name, loglevel="DEBUG"):
logger = logging.getLogger(name)
logger.setLevel(loglevel)
handler = logging.FileHandler("%s/%s.log" % (logs_path, name))
handler.setLevel(loglevel)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def hashgen(data, func=hashlib.sha1):
result = func()
result.update(data)
return result.hexdigest() | agpl-3.0 | Python |
a05e4e478a932d18350a83687ee80103c8398f0d | Change winrate threshold | wizzomafizzo/hearthpy,wizzomafizzo/hearthpy | config.py | config.py | # hearthpy global config
db_filename = "hearthpy.db"
auth_filename = "hearthpy.auth"
port = 5000
match_limit = 20
front_match_limit = 21
card_limit = 22
winrate_tiers = [50, 55] # %
min_games_deck = 20
deck_template = "^(Dd|Hr|Me|Pn|Pt|Re|Sn|Wk|Wr) .+ \d+\.\d+$"
card_image_url = "http://wow.zamimg.com/images/hearthstone/cards/enus/original/{}.png"
cards_json_url = "https://api.hearthstonejson.com/v1/latest/enUS/cards.collectible.json"
card_packs = ["EXPERT1", "GVG", "TGT"]
craft_cost = {
"COMMON": 40,
"RARE": 100,
"EPIC": 400,
"LEGENDARY": 1600
}
heroes = [
"Druid",
"Hunter",
"Mage",
"Paladin",
"Priest",
"Rogue",
"Shaman",
"Warlock",
"Warrior"
]
heroes_abbrv = {
"Dd": "Druid",
"Hr": "Hunter",
"Me": "Mage",
"Pn": "Paladin",
"Pt": "Priest",
"Re": "Rogue",
"Sn": "Shaman",
"Wk": "Warlock",
"Wr": "Warrior"
}
modes = [
"Legend",
"Rank 1",
"Rank 2",
"Rank 3",
"Rank 4",
"Rank 5",
"Rank 6",
"Rank 7",
"Rank 8",
"Rank 9",
"Rank 10",
"Rank 11",
"Rank 12",
"Rank 13",
"Rank 14",
"Rank 15",
"Rank 16",
"Rank 17",
"Rank 18",
"Rank 19",
"Rank 20",
"Rank 21",
"Rank 22",
"Rank 23",
"Rank 24",
"Rank 25",
"Casual"
]
| # hearthpy global config
db_filename = "hearthpy.db"
auth_filename = "hearthpy.auth"
port = 5000
match_limit = 20
front_match_limit = 21
card_limit = 22
winrate_tiers = [50, 60] # %
min_games_deck = 20
deck_template = "^(Dd|Hr|Me|Pn|Pt|Re|Sn|Wk|Wr) .+ \d+\.\d+$"
card_image_url = "http://wow.zamimg.com/images/hearthstone/cards/enus/original/{}.png"
cards_json_url = "https://api.hearthstonejson.com/v1/latest/enUS/cards.collectible.json"
card_packs = ["EXPERT1", "GVG", "TGT"]
craft_cost = {
"COMMON": 40,
"RARE": 100,
"EPIC": 400,
"LEGENDARY": 1600
}
heroes = [
"Druid",
"Hunter",
"Mage",
"Paladin",
"Priest",
"Rogue",
"Shaman",
"Warlock",
"Warrior"
]
heroes_abbrv = {
"Dd": "Druid",
"Hr": "Hunter",
"Me": "Mage",
"Pn": "Paladin",
"Pt": "Priest",
"Re": "Rogue",
"Sn": "Shaman",
"Wk": "Warlock",
"Wr": "Warrior"
}
modes = [
"Legend",
"Rank 1",
"Rank 2",
"Rank 3",
"Rank 4",
"Rank 5",
"Rank 6",
"Rank 7",
"Rank 8",
"Rank 9",
"Rank 10",
"Rank 11",
"Rank 12",
"Rank 13",
"Rank 14",
"Rank 15",
"Rank 16",
"Rank 17",
"Rank 18",
"Rank 19",
"Rank 20",
"Rank 21",
"Rank 22",
"Rank 23",
"Rank 24",
"Rank 25",
"Casual"
]
| mit | Python |
2dd68e018ad9e0bce16157fa8a748b5d075196b2 | fix test | JesseAldridge/clipmon,JesseAldridge/clipmon | test.py | test.py | import os
import clipmon
import conf
with open('test_cases.txt') as f:
lines = f.read().splitlines()
proj_dir = conf.curr_proj_dirs[0]
for i in range(0, len(lines), 3):
test_line, expected = lines[i:i+2]
if expected == 'None':
expected = None
actual = clipmon.clip_str_to_path_line(test_line, proj_dir)
expected, actual = [
os.path.expanduser(s.replace('<proj_dir>', proj_dir))
if s else None for s in (expected, actual)]
print 'line: ', test_line
print 'expected:', expected
print 'actual: ', actual
print
assert actual == expected
print 'ohhh yeahhh'
| import os
import clipmon
import conf
with open('test_cases.txt') as f:
lines = f.read().splitlines()
for i in range(0, len(lines), 3):
test_line, expected = lines[i:i+2]
if expected == 'None':
expected = None
actual = clipmon.clip_str_to_path_line(test_line)
expected, actual = [
os.path.expanduser(s.replace('<proj_dir>', conf.curr_proj_dir))
if s else None for s in (expected, actual)]
print 'line: ', test_line
print 'expected:', expected
print 'actual: ', actual
print
assert actual == expected
print 'ohhh yeahhh'
| mit | Python |
e978b8be7ccfd9206d618f5a3de855a306ceccfe | Test if rotor encodes with different offset properly | ranisalt/enigma | test.py | test.py | import unittest
from enigma import Enigma, Steckerbrett, Umkehrwalze, Walzen
class RotorTestCase(unittest.TestCase):
def test_rotor_encoding(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q')
self.assertEqual('E', rotor.encode('A'))
def test_rotor_reverse_encoding(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q')
self.assertEqual('U', rotor.encode_reverse('A'))
def test_rotor_different_setting(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q',
setting='B')
self.assertEqual('K', rotor.encode('A'))
self.assertEqual('K', rotor.encode_reverse('A'))
def test_rotor_different_offset(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q',
offset='B')
self.assertEqual('D', rotor.encode('A'))
self.assertEqual('W', rotor.encode_reverse('A'))
def run_tests():
runner = unittest.TextTestRunner()
suite = unittest.TestLoader().loadTestsFromTestCase(RotorTestCase)
runner.run(suite)
if __name__ == '__main__': # pragma: no cover
run_tests() | import unittest
from enigma import Enigma, Steckerbrett, Umkehrwalze, Walzen
class RotorTestCase(unittest.TestCase):
def test_rotor_encoding(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q')
self.assertEqual('E', rotor.encode('A'))
def test_rotor_reverse_encoding(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q')
self.assertEqual('U', rotor.encode_reverse('A'))
def test_rotor_different_setting(self):
rotor = Walzen(wiring='EKMFLGDQVZNTOWYHXUSPAIBRCJ', notch='Q',
setting='B')
self.assertEqual('K', rotor.encode('A'))
self.assertEqual('K', rotor.encode_reverse('A'))
def run_tests():
runner = unittest.TextTestRunner()
suite = unittest.TestLoader().loadTestsFromTestCase(RotorTestCase)
runner.run(suite)
if __name__ == '__main__': # pragma: no cover
run_tests() | mit | Python |
5173f9e37de4bddda59c76b6e48fe8ccfc9692f1 | Make cython.py executable and add Unix shebang | dahebolangkuan/cython,fperez/cython,JelleZijlstra/cython,madjar/cython,roxyboy/cython,ABcDexter/cython,scoder/cython,mcanthony/cython,rguillebert/CythonCTypesBackend,hhsprings/cython,ChristopherHogan/cython,hpfem/cython,mrGeen/cython,fperez/cython,fabianrost84/cython,mcanthony/cython,encukou/cython,fabianrost84/cython,roxyboy/cython,encukou/cython,slonik-az/cython,encukou/cython,cython/cython,JelleZijlstra/cython,andreasvc/cython,larsmans/cython,hpfem/cython,mrGeen/cython,encukou/cython,madjar/cython,da-woods/cython,ABcDexter/cython,slonik-az/cython,marscher/cython,mcanthony/cython,hpfem/cython,ChristopherHogan/cython,andreasvc/cython,hhsprings/cython,hickford/cython,fabianrost84/cython,marscher/cython,JelleZijlstra/cython,acrispin/cython,acrispin/cython,achernet/cython,JelleZijlstra/cython,andreasvc/cython,mcanthony/cython,fperez/cython,larsmans/cython,achernet/cython,dahebolangkuan/cython,larsmans/cython,fperez/cython,larsmans/cython,acrispin/cython,dahebolangkuan/cython,encukou/cython,madjar/cython,JelleZijlstra/cython,mcanthony/cython,marscher/cython,mrGeen/cython,c-blake/cython,roxyboy/cython,slonik-az/cython,andreasvc/cython,achernet/cython,cython/cython,acrispin/cython,rguillebert/CythonCTypesBackend,roxyboy/cython,c-blake/cython,marscher/cython,rguillebert/CythonCTypesBackend,hickford/cython,madjar/cython,da-woods/cython,rguillebert/CythonCTypesBackend,slonik-az/cython,hpfem/cython,ABcDexter/cython,fperez/cython,hickford/cython,fabianrost84/cython,da-woods/cython,acrispin/cython,hhsprings/cython,cython/cython,roxyboy/cython,scoder/cython,hhsprings/cython,larsmans/cython,c-blake/cython,hpfem/cython,dahebolangkuan/cython,mrGeen/cython,fabianrost84/cython,marscher/cython,c-blake/cython,achernet/cython,ChristopherHogan/cython,andreasvc/cython,scoder/cython,madjar/cython,scoder/cython,ABcDexter/cython,cython/cython,da-woods/cython,mrGeen/cython,achernet/cython,hhsprings/cython,dahebolangkuan/cython,hickford/cython,slonik-az/cython,hickford/cython,c-blake/cython,ABcDexter/cython | cython.py | cython.py | #!/usr/bin/env python
#
# Cython -- Main Program, generic
#
if __name__ == '__main__':
import os
import sys
# Make sure we import the right Cython
cythonpath, _ = os.path.split(os.path.realpath(__file__))
sys.path.insert(0, cythonpath)
from Cython.Compiler.Main import main
main(command_line = 1)
else:
# Void cython.* directives.
from Cython.Shadow import *
| #
# Cython -- Main Program, generic
#
if __name__ == '__main__':
import os
import sys
# Make sure we import the right Cython
cythonpath, _ = os.path.split(os.path.realpath(__file__))
sys.path.insert(0, cythonpath)
from Cython.Compiler.Main import main
main(command_line = 1)
else:
# Void cython.* directives.
from Cython.Shadow import *
| apache-2.0 | Python |
94c647ac51a9547371bb7326964995904688abe3 | Add django-import-export to admin | johngian/woodstock,mozilla/woodstock,ppapadeas/woodstock,johngian/woodstock,ppapadeas/woodstock,mozilla/woodstock,ppapadeas/woodstock,mozilla/woodstock,mozilla/woodstock,ppapadeas/woodstock,johngian/woodstock,johngian/woodstock | woodstock/voting/admin.py | woodstock/voting/admin.py | from django.contrib import admin
from import_export.admin import ExportMixin, ImportExportMixin
from import_export import fields, resources
from woodstock.voting.models import MozillianGroup, MozillianProfile, Vote
class MozillianGroupResouce(resources.ModelResource):
negative_votes = fields.Field()
skip_votes = fields.Field()
positive_votes = fields.Field()
stellar_votes = fields.Field()
total_votes = fields.Field()
class Meta:
model = MozillianProfile
def dehydrate_negative_votes(self, mozillianprofile):
return mozillianprofile.votes.filter(vote=-1).count()
def dehydrate_skip_votes(self, mozillianprofile):
return mozillianprofile.votes.filter(vote=0).count()
def dehydrate_positive_votes(self, mozillianprofile):
return mozillianprofile.votes.filter(vote=1).count()
def dehydrate_stellar_votes(self, mozillianprofile):
return mozillianprofile.votes.filter(vote=2).count()
def total_votes(self, mozillianprofile):
negatives = mozillianprofile.votes.filter(vote=-1).count()
positives = (mozillianprofile.votes.filter(vote=1).count() +
mozillianprofile.votes.filter(vote=2).count())
return (positives - negatives)
class MozillianProfileAdmin(ImportExportMixin, admin.ModelAdmin):
"""Mozillian profiles under /admin."""
resource_class = MozillianGroupResouce
model = MozillianProfile
search_fields = ['full_name', 'country']
list_display = ['username', 'full_name', 'email', 'city', 'country',
'negative', 'skip', 'positive', 'stellar']
def negative(self, obj):
return obj.votes.filter(vote=-1).count()
def skip(self, obj):
return obj.votes.filter(vote=0).count()
def positive(self, obj):
return obj.votes.filter(vote=1).count()
def stellar(self, obj):
return obj.votes.filter(vote=2).count()
class VoteAdmin(ExportMixin, admin.ModelAdmin):
model = Vote
list_display = ['voter', 'nominee', 'vote']
class MozillianGroupAdmin(ExportMixin, admin.ModelAdmin):
model = MozillianGroup
admin.site.register(MozillianGroup, MozillianGroupAdmin)
admin.site.register(Vote, VoteAdmin)
admin.site.register(MozillianProfile, MozillianProfileAdmin)
| import csv
from django.contrib import admin
from django.http import HttpResponse
from woodstock.voting.models import MozillianGroup, MozillianProfile, Vote
def export_as_csv_action(description=None, fields=None, exclude=None,
header=True):
"""
This function returns an export csv action
'fields' and 'exclude' work like in django ModelForm
'header' is whether or not to output the column names as the first row
Based on snippet http://djangosnippets.org/snippets/2020/
"""
def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
based on http://djangosnippets.org/snippets/1697/
"""
opts = modeladmin.model._meta
field_names = set([field.name for field in opts.fields])
if fields:
fieldset = set(fields)
field_names = fieldset
elif exclude:
excludeset = set(exclude)
field_names = field_names - excludeset
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = ('attachment; filename=%s.csv' %
unicode(opts).replace('.', '_'))
writer = csv.writer(response, delimiter=';')
if header:
writer.writerow(list(field_names))
for obj in queryset:
writer.writerow([unicode(getattr(obj, field)).encode('utf-8')
for field in field_names])
return response
export_as_csv.short_description = (description or 'Export to CSV file')
return export_as_csv
class MozillianProfileAdmin(admin.ModelAdmin):
"""Mozillian profiles under /admin."""
search_fields = ['full_name', 'country']
list_display = ['full_name', 'email', 'city', 'country', 'negative',
'skip', 'positive', 'stellar']
actions = [export_as_csv_action(fields=('full_name', 'email', 'city',
'country', 'positive', 'negative',
'stellar', 'skip'),
header=True)]
def negative(self, obj):
return obj.votes.filter(vote=-1).count()
def skip(self, obj):
return obj.votes.filter(vote=0).count()
def positive(self, obj):
return obj.votes.filter(vote=1).count()
def stellar(self, obj):
return obj.votes.filter(vote=2).count()
class VoteAdmin(admin.ModelAdmin):
model = Vote
list_display = ['voter', 'nominee', 'vote']
class MozillianGroupAdmin(admin.ModelAdmin):
model = MozillianGroup
admin.site.register(MozillianGroup, MozillianGroupAdmin)
admin.site.register(Vote, VoteAdmin)
admin.site.register(MozillianProfile, MozillianProfileAdmin)
| mpl-2.0 | Python |
89c960bbe4154914f39391e4f0a4d47db33c01fb | Update test baseline | Kitware/geojs,OpenGeoscience/geojs,Kitware/geojs,OpenGeoscience/geojs,OpenGeoscience/geojs,Kitware/geojs | testing/test-cases/selenium-tests/glMultiPolygons/testGlMultiPolygons.py | testing/test-cases/selenium-tests/glMultiPolygons/testGlMultiPolygons.py | #!/usr/bin/env python
from selenium_test import FirefoxTest, ChromeTest
class glMultiPolygonsBase(object):
testCase = ('glMultiPolygons',)
testRevision = 4
def loadPage(self):
self.resizeWindow(640, 480)
self.loadURL('glMultiPolygons/index.html')
self.wait()
self.resizeWindow(640, 480)
def testGlMultiPolygons(self):
self.loadPage()
testName = 'drawGlMultiPolygons'
self.screenshotTest(testName, revision=self.testRevision)
class FirefoxOSM(glMultiPolygonsBase, FirefoxTest):
testCase = glMultiPolygonsBase.testCase + ('firefox',)
class ChromeOSM(glMultiPolygonsBase, ChromeTest):
testCase = glMultiPolygonsBase.testCase + ('chrome',)
if __name__ == '__main__':
import unittest
unittest.main()
| #!/usr/bin/env python
from selenium_test import FirefoxTest, ChromeTest
class glMultiPolygonsBase(object):
testCase = ('glMultiPolygons',)
testRevision = 3
def loadPage(self):
self.resizeWindow(640, 480)
self.loadURL('glMultiPolygons/index.html')
self.wait()
self.resizeWindow(640, 480)
def testGlMultiPolygons(self):
self.loadPage()
testName = 'drawGlMultiPolygons'
self.screenshotTest(testName, revision=self.testRevision)
class FirefoxOSM(glMultiPolygonsBase, FirefoxTest):
testCase = glMultiPolygonsBase.testCase + ('firefox',)
class ChromeOSM(glMultiPolygonsBase, ChromeTest):
testCase = glMultiPolygonsBase.testCase + ('chrome',)
if __name__ == '__main__':
import unittest
unittest.main()
| apache-2.0 | Python |
4514ddeac5bd2adc5228ecd4cd497059e7c82c88 | delete unused code | free-free/pyblog,free-free/pyblog,free-free/pyblog,free-free/pyblog | pyblog/application.py | pyblog/application.py | # -*- coding:utf-8 -*-
import os,time,asyncio,json
import logging
logging.basicConfig(level=logging.ERROR)
try:
from aiohttp import web
except ImportError:
logging.error("Can't import module aiohttp")
from pyblog.log import Log
from pyblog.httptools import Middleware,Route
from pyblog.template import Template
from pyblog.config import Config
from pyblog.database import DB
from pyblog.session import SessionManager
logging.basicConfig(level=logging.INFO)
__all__=("Application",)
class Application(web.Application):
def __init__(self):
self._loop=asyncio.get_event_loop()
super(Application,self).__init__(loop=self._loop,middlewares=Middleware.allmiddlewares())
def run(self,addr='127.0.0.1',port='8000'):
self._server=self._loop.run_until_complete(self.get_server(addr,port))
try:
self._loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self._db_pool.close()
self._loop.run_until_complete(self._db_pool.wait_closed())
self._server.close()
self._loop.run_until_complete(self._server.wait_closed())
self._loop.run_until_complete(self.shutdown())
self._loop.run_until_complete(self._handler.finish_connections(60))
self._loop.run_until_complete(self.cleanup())
self._loop.close()
@asyncio.coroutine
def get_server(self,addr,port):
self['__templating__']=Template()
Route.register_route(self)
self._db_pool=yield from DB.createpool(self._loop)
self._handler=self.make_handler()
server=yield from self._loop.create_server(self._handler,addr,port)
Log.info("server start at http://%s:%s"%(addr,port))
print("server start at http://%s:%s"%(addr,port))
return server
class AppAbstractRegister(object):
r'''
AppAbstractRegister is a place where you can register your own's things that you need to use in your app procession,
those things mostly likes your task queue executor and so on
'''
def __init__(self,*args,**kwargs):
pass
def process(self):
r"""
logic code ,you want to use register
"""
pass
| # -*- coding:utf-8 -*-
import os,time,asyncio,json
import logging
logging.basicConfig(level=logging.ERROR)
try:
from aiohttp import web
except ImportError:
logging.error("Can't import module aiohttp")
from pyblog.log import Log
from pyblog.httptools import Middleware,Route
from pyblog.template import Template
from pyblog.config import Config
from pyblog.database import DB
from pyblog.session import SessionManager
logging.basicConfig(level=logging.INFO)
__all__=("Application",)
class Application(web.Application):
def __init__(self):
self._loop=asyncio.get_event_loop()
super(Application,self).__init__(loop=self._loop,middlewares=Middleware.allmiddlewares())
def run(self,addr='127.0.0.1',port='8000'):
self._server=self._loop.run_until_complete(self.get_server(addr,port))
try:
self._loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self._db_pool.close()
self._loop.run_until_complete(self._db_pool.wait_closed())
self._server.close()
self._loop.run_until_complete(self._server.wait_closed())
self._loop.run_until_complete(self.shutdown())
self._loop.run_until_complete(self._handler.finish_connections(60))
self._loop.run_until_complete(self.cleanup())
self._loop.close()
@asyncio.coroutine
def get_server(self,addr,port):
self['__templating__']=Template()
Route.register_route(self)
self._db_pool=yield from DB.createpool(self._loop)
#pool=yield from create_pool(self._loop)
self._handler=self.make_handler()
server=yield from self._loop.create_server(self._handler,addr,port)
logging.info("server start at http://%s:%s"%(addr,port))
Log.info("server start at http://%s:%s"%(addr,port))
print("server start at http://%s:%s"%(addr,port))
return server
class AppAbstractRegister(object):
r'''
AppAbstractRegister is a place where you can register your own's things that you need to use in your app procession,
those things mostly likes your task queue executor and so on
'''
def __init__(self,*args,**kwargs):
pass
def process(self):
r"""
logic code ,you want to use register
"""
pass
| mit | Python |
4014553a3d7ddad5f86f53e212fd0b15a0535398 | Fix template settings | uhuramedia/cookiecutter-django,uhuramedia/cookiecutter-django | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/deploy.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/deploy.py | from base import *
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '{{cookiecutter.project_name}}',
'HOST': '',
'USER': '',
'PASSWORD': '',
'CONN_MAX_AGE': 600,
}
}
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'key_prefix'
}
}
ALLOWED_HOSTS = ['.{{cookiecutter.domain_name}}', '{{cookiecutter.domain_name}}.'] # subdomains and FQDN
ROOT_URLCONF = '{{cookiecutter.repo_name}}.urls'
| from base import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '{{cookiecutter.project_name}}',
'HOST': '',
'USER': '',
'PASSWORD': '',
'CONN_MAX_AGE': 600,
}
}
TEMPLATE_LOADERS = (
(
'django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
),
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'key_prefix'
}
}
ALLOWED_HOSTS = ['.{{cookiecutter.domain_name}}', '{{cookiecutter.domain_name}}.'] # subdomains and FQDN
ROOT_URLCONF = '{{cookiecutter.repo_name}}.urls' | bsd-3-clause | Python |
2b6ac1f28b85b7726c6fc52eabaabed73acb2a98 | add test_view | evuez/mutations | test.py | test.py | import logging
from time import sleep
from pyglet import app
from pyglet.window import Window
from pyglet.clock import schedule_interval
from render import MapView
from mutations import Map
from mutations import Body
from mutations import EnergyBank
logging.basicConfig(level=logging.DEBUG)
def test():
map_ = Map(1000, 1000)
for i in range(2):
map_.add(EnergyBank(map_))
for i in range(5):
map_.add(Body(map_))
for i in range(1000):
map_.tick()
def test_view():
map_ = Map(500, 500)
for i in range(2):
map_.add(EnergyBank(map_))
for i in range(50):
map_.add(Body(map_))
def update(dt):
map_.tick()
window = Window(map_.width, map_.height)
map_view = MapView(map_)
schedule_interval(update, 0.1)
@window.event
def on_draw():
window.clear()
map_view.draw()
app.run()
if __name__ == '__main__':
test_view()
| from time import sleep
from mutations import Map
from mutations import Body
from mutations import EnergyBank
import logging
logging.basicConfig(filename='test.log', level=logging.INFO)
def test():
map_ = Map(1000, 1000)
for i in range(2):
map_.add(EnergyBank(map_))
for i in range(5):
map_.add(Body(map_))
for i in range(1000):
map_.tick()
if __name__ == '__main__':
test()
| mit | Python |
9c7acc7e0c4973a85b4e703d2e05312d4d727cd6 | Reorder __all__ | numberoverzero/pyservice | pyservice/__init__.py | pyservice/__init__.py | from pyservice.client import Client
from pyservice.layer import Layer
from pyservice.operation import Operation
from pyservice.service import Service
__all__ = ["Client", "Layer", "Operation", "Service"]
| from pyservice.client import Client
from pyservice.service import Service
__all__ = ["Client", "Service"]
| mit | Python |
26b14aa13f28859585080115f66bb7a995b37d59 | change error output | ami-GS/pyHPACK | test.py | test.py | import os
import json
from HPACK import decode
TESTCASE = [
'hpack-test-case/haskell-http2-naive/',
# 'hpack-test-case/haskell-http2-naive-huffman/',
# 'hpack-test-case/haskell-http2-static/',
# 'hpack-test-case/haskell-http2-static-huffman/',
# 'hpack-test-case/haskell-http2-linear/',
# 'hpack-test-case/haskell-http2-linear-huffman/',
]
if __name__ == "__main__":
headers = None
for i in range(len(TESTCASE)):
cases = [TESTCASE[i] + name for name in os.listdir(TESTCASE[i])]
for case in cases:
allPass = False
with open(case) as f:
data = json.loads(f.read())
for seqno in range(len(data['cases'])):
try:
headers = decode(data['cases'][seqno]['wire'])
except Exception as e:
print(e)
for header in data['cases'][seqno]['headers']:
if header not in headers:
print header
print headers[header.keys()[0]]
print('Missed the in %s seqno %d' % (case, seqno))
break
if seqno == len(data['cases'])-1:
allPass = True
if allPass:
print('Passed the %s' % case)
| import os
import json
from HPACK import decode
TESTCASE = [
'hpack-test-case/haskell-http2-naive/',
# 'hpack-test-case/haskell-http2-naive-huffman/',
# 'hpack-test-case/haskell-http2-static/',
# 'hpack-test-case/haskell-http2-static-huffman/',
# 'hpack-test-case/haskell-http2-linear/',
# 'hpack-test-case/haskell-http2-linear-huffman/',
]
if __name__ == "__main__":
headers = None
for i in range(len(TESTCASE)):
cases = [TESTCASE[i] + name for name in os.listdir(TESTCASE[i])]
for case in cases:
allPass = False
with open(case) as f:
data = json.loads(f.read())
for seqno in range(len(data['cases'])):
try:
headers = decode(data['cases'][seqno]['wire'])
except Exception as e:
print(e)
if headers != data['cases'][seqno]['headers']:
print('Missed the in %s seqno %d' % (case, seqno))
break
if seqno == len(data['cases'])-1:
allPass = True
if allPass:
print('Passed the %s' % case)
| mit | Python |
7aa281b56fa6fb4fc4c0625a13f303a085334d36 | Build a set of categories to avoid duplication | vodik/pytest-exceptional | pytest_exceptional.py | pytest_exceptional.py | # -*- coding: utf-8 -*-
import pytest
from _pytest._code.code import TerminalRepr
class PytestException(Exception):
pass
class ExceptionRepr(TerminalRepr):
def __init__(self, excinfo, longrepr):
self.excinfo = excinfo
self.longrepr = longrepr
def toterminal(self, tw):
try:
self.excinfo.value.toterminal(self.longrepr, tw)
except AttributeError:
self.longrepr.toterminal(tw)
def terminal_summary(self, terminalreporter, header):
try:
header = self.excinfo.value.summary_header(header)
except:
pass
_, _, word = self.excinfo.value.__teststatus__
terminalreporter.write_sep('_', header, **word[1])
self.toterminal(terminalreporter._tw)
def pytest_namespace():
return {'Exception': PytestException}
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if not call.excinfo:
return
elif call.excinfo.errisinstance(PytestException):
report.excinfo = call.excinfo
report.longrepr = ExceptionRepr(report.excinfo,
report.longrepr)
def pytest_report_teststatus(report):
if hasattr(report, 'excinfo'):
return report.excinfo.value.__teststatus__
def pytest_terminal_summary(terminalreporter):
categories = {cls.__teststatus__[0]
for cls in PytestException.__subclasses__()}
for cat in categories:
for report in terminalreporter.getreports(cat):
header = terminalreporter._getfailureheadline(report)
report.longrepr.terminal_summary(terminalreporter, header)
| # -*- coding: utf-8 -*-
import pytest
from _pytest._code.code import TerminalRepr
class PytestException(Exception):
pass
class ExceptionRepr(TerminalRepr):
def __init__(self, excinfo, longrepr):
self.excinfo = excinfo
self.longrepr = longrepr
def toterminal(self, tw):
try:
self.excinfo.value.toterminal(self.longrepr, tw)
except AttributeError:
self.longrepr.toterminal(tw)
def terminal_summary(self, terminalreporter, header):
try:
header = self.excinfo.value.summary_header(header)
except:
pass
_, _, word = self.excinfo.value.__teststatus__
terminalreporter.write_sep('_', header, **word[1])
self.toterminal(terminalreporter._tw)
def pytest_namespace():
return {'Exception': PytestException}
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if not call.excinfo:
return
elif call.excinfo.errisinstance(PytestException):
report.excinfo = call.excinfo
report.longrepr = ExceptionRepr(report.excinfo,
report.longrepr)
def pytest_report_teststatus(report):
if hasattr(report, 'excinfo'):
return report.excinfo.value.__teststatus__
def pytest_terminal_summary(terminalreporter):
for cls in PytestException.__subclasses__():
cat, _, _ = cls.__teststatus__
for report in terminalreporter.getreports(cat):
header = terminalreporter._getfailureheadline(report)
report.longrepr.terminal_summary(terminalreporter, header)
| mit | Python |
53e0a7b7ac81fab6527796e2fa33a456d0650863 | Reduce maximum false positive rate to 5%. | Aurora0001/LearnProgrammingBot | test.py | test.py | import unittest
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from sklearn import cross_validation
import model
import main
class TestClassifier(unittest.TestCase):
def test_classifications(self):
false_positives = 0
false_negatives = 0
correct = 0
wrong = 0
engine = create_engine('sqlite:///data.db')
Session = sessionmaker(bind=engine)
session = Session()
training_data = session.query(model.Corpus).all()
training_values = [rec.title + ' ' + rec.text for rec in training_data]
training_targets = [rec.category for rec in training_data]
training_values, testing_values, training_targets, testing_targets = cross_validation.train_test_split(training_values, training_targets, test_size=0.3, random_state=0)
classifier = main.Classifier(training_values, training_targets)
for (i, message_text) in enumerate(testing_values):
classification = classifier.classify(message_text)[0]
if testing_targets[i] == 'good' and classification != 'good':
false_positives += 1
print(message_text)
print('[Suspected {}]'.format(classification))
print('---')
elif testing_targets[i] != 'good' and classification == 'good':
false_negatives += 1
elif testing_targets[i] == classification:
correct += 1
else:
wrong += 1
print('{} false positives ({})'.format(false_positives, float(false_positives)/len(testing_values)))
print('{} false negatives ({})'.format(false_negatives, float(false_negatives)/len(testing_values)))
print('{} correct ({})'.format(correct, float(correct)/len(testing_values)))
print('{} wrong ({})'.format(wrong, float(wrong)/len(testing_values)))
if float(false_positives) / len(testing_values) > 0.05:
raise Exception('False positive rate too high!')
elif float(correct) / len(testing_values) < 0.6:
raise Exception('Correct identification rate too low!')
if __name__ == '__main__':
unittest.main()
| import unittest
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from sklearn import cross_validation
import model
import main
class TestClassifier(unittest.TestCase):
def test_classifications(self):
false_positives = 0
false_negatives = 0
correct = 0
wrong = 0
engine = create_engine('sqlite:///data.db')
Session = sessionmaker(bind=engine)
session = Session()
training_data = session.query(model.Corpus).all()
training_values = [rec.title + ' ' + rec.text for rec in training_data]
training_targets = [rec.category for rec in training_data]
training_values, testing_values, training_targets, testing_targets = cross_validation.train_test_split(training_values, training_targets, test_size=0.3, random_state=0)
classifier = main.Classifier(training_values, training_targets)
for (i, message_text) in enumerate(testing_values):
classification = classifier.classify(message_text)[0]
if testing_targets[i] == 'good' and classification != 'good':
false_positives += 1
print(message_text)
print('[Suspected {}]'.format(classification))
print('---')
elif testing_targets[i] != 'good' and classification == 'good':
false_negatives += 1
elif testing_targets[i] == classification:
correct += 1
else:
wrong += 1
print('{} false positives ({})'.format(false_positives, float(false_positives)/len(testing_values)))
print('{} false negatives ({})'.format(false_negatives, float(false_negatives)/len(testing_values)))
print('{} correct ({})'.format(correct, float(correct)/len(testing_values)))
print('{} wrong ({})'.format(wrong, float(wrong)/len(testing_values)))
if float(false_positives) / len(testing_values) > 0.1:
raise Exception('False positive rate too high!')
elif float(correct) / len(testing_values) < 0.6:
raise Exception('Correct identification rate too low!')
if __name__ == '__main__':
unittest.main()
| mit | Python |
47d1ad8ed082c43f1ff3bd157db077d1e494186a | Add to_json method for departures | MarkusH/bvg-grabber | bvggrabber/api/__init__.py | bvggrabber/api/__init__.py | # -*- coding: utf-8 -*-
import json
from datetime import datetime
from dateutil.parser import parse
fullformat = lambda dt: dt.strftime('%Y-%m-%d %H:%M')
hourformat = lambda dt: dt.strftime('%H:%M')
class QueryApi(object):
def __init__(self):
pass
def call(self):
raise NotImplementedError("The inheriting class needs to implement "
"the call() method!")
class Departure(object):
def __init__(self, start, end, when, line):
self.start = start
self.end = end
self.line = line
self.now = datetime.now()
if isinstance(when, (int, float)):
# We assume to get a UNIX / POSIX timestamp
self.when = datetime.fromtimestamp(when)
elif isinstance(when, str):
self.when = parse(when, fuzzy=True)
#if (self.when - self.now).total_seconds() < -60:
# self.when = self.when + timedelta(days=1)
elif isinstance(when, datetime):
self.when = when
else:
ValueError("when must be a valid datetime, timestamp or string!")
def __str__(self):
return "Start: %s, End: %s, when: %s, now: %s, line: %s" % (
self.start, self.end, hourformat(self.when), hourformat(self.now),
self.line)
@property
def remaining(self):
return self.when - self.now
def to_json(self):
return json.dumps({'start': self.start.decode('iso-8859-1'),
'end': self.end,
'line': self.line,
'now_full': fullformat(self.now),
'now_hour': hourformat(self.now),
'when_full': fullformat(self.when),
'when_hour': hourformat(self.when),
'remaining': round(self.remaining.total_seconds())})
| # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil.parser import parse
fullformat = lambda dt: dt.strftime('%Y-%m-%d %H:%M')
hourformat = lambda dt: dt.strftime('%H:%M')
class QueryApi():
def __init__(self):
pass
def call(self):
raise NotImplementedError("The inheriting class needs to implement "
"the call() method!")
class Departure():
def __init__(self, start, end, when, line):
self.start = start
self.end = end
self.now = datetime.now()
if isinstance(when, (int, float)):
# We assume to get a UNIX / POSIX timestamp
self.when = datetime.fromtimestamp(when)
elif isinstance(when, str):
self.when = parse(when, fuzzy=True)
#if (self.when - self.now).total_seconds() < -60:
# self.when = self.when + timedelta(days=1)
elif isinstance(when, datetime):
self.when = when
else:
ValueError("when must be a valid datetime, timestamp or string!")
self.line = line
def __str__(self):
return "Start: %s, End: %s, when: %s, now: %s, line: %s" % (
self.start, self.end, hourformat(self.when), hourformat(self.now),
self.line)
@property
def remaining(self):
return self.when - self.now
def to_json(self):
pass
| bsd-3-clause | Python |
724c1b71d5ca299dd23ff51d5f568c08a77f2cf5 | Update _version.py | 4dn-dcic/tibanna,4dn-dcic/tibanna,4dn-dcic/tibanna | tibanna/_version.py | tibanna/_version.py | """Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.10.1"
| """Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.10.0"
| mit | Python |
9092e4d5d41efd347b24efb54403d65451759d3d | make sure the example works under all supported python's vms | mgedmin/socketpool,benoitc/socketpool | examples/test_threaded.py | examples/test_threaded.py | # -*- coding: utf-8 -
#
# This file is part of socketpool.
# See the NOTICE for more information.
import socket
import sys
import threading
try:
from queue import *
except ImportError:
from Queue import *
try:
import SocketServer as socketserver
except ImportError:
import socketserver
import time
from socketpool.pool import ConnectionPool
from socketpool.conn import TcpConnector
PY3 = sys.version_info[0] == 3
if sys.version_info[0] == 3:
def s2b(s):
return s.encode('latin1')
else:
def s2b(s):
return s
class EchoHandler(socketserver.BaseRequestHandler):
def handle(self):
while True:
data = self.request.recv(1024)
if not data:
break
self.request.send(data)
print("echoed %r" % data)
class EchoServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 0
server = EchoServer((HOST, PORT), EchoHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever,
kwargs={"poll_interval":0.5})
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
options = {'host': ip, 'port': port}
pool = ConnectionPool(factory=TcpConnector, options=options)
q = Queue()
def runpool():
while True:
try:
data = q.get(False)
except Empty:
break
try:
with pool.connection() as conn:
print("conn: pool size: %s" % pool.size())
sent = conn.send(data)
echo = conn.recv(1024)
print("got %s" % data)
assert data == echo
finally:
q.task_done()
for i in range(20):
q.put(s2b("Hello World %s" % i), False)
for i in range(4):
th = threading.Thread(target=runpool)
th.daemnon = True
th.start()
q.join()
print ("final pool size: %s" % pool.size())
pool.release_all()
server.shutdown()
| # -*- coding: utf-8 -
#
# This file is part of socketpool.
# See the NOTICE for more information.
import socket
import threading
try:
from queue import *
except ImportError:
from Queue import *
try:
import SocketServer as socketserver
except ImportError:
import socketserver
import time
from socketpool.pool import ConnectionPool
from socketpool.conn import TcpConnector
class EchoHandler(socketserver.BaseRequestHandler):
def handle(self):
while True:
data = self.request.recv(1024)
if not data:
break
self.request.send(data)
print("echoed %r" % data)
class EchoServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 0
server = EchoServer((HOST, PORT), EchoHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever,
kwargs={"poll_interval":0.5})
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
options = {'host': ip, 'port': port}
pool = ConnectionPool(factory=TcpConnector, options=options)
q = Queue()
def runpool():
while True:
try:
data = q.get(False)
except Empty:
break
try:
with pool.connection() as conn:
print("conn: pool size: %s" % pool.size())
sent = conn.send(data)
echo = conn.recv(1024)
print("got %s" % data)
assert data == echo
finally:
q.task_done()
for i in range(20):
q.put(bytes("Hello World %s" % i, 'UTF-8'), False)
for i in range(4):
th = threading.Thread(target=runpool)
th.daemnon = True
th.start()
q.join()
print ("final pool size: %s" % pool.size())
pool.release_all()
server.shutdown()
| mit | Python |
c4d354c271e405a48fab9cdaeb41b2d5bc177be0 | Add changes from Mikhail Terekhov <terekhov@emc.com>. | rvs/gpdb,janebeckman/gpdb,greenplum-db/gpdb,lpetrov-pivotal/gpdb,CraigHarris/gpdb,foyzur/gpdb,postmind-net/postgres-xl,kmjungersen/PostgresXL,oberstet/postgres-xl,randomtask1155/gpdb,arcivanov/postgres-xl,yuanzhao/gpdb,lintzc/gpdb,royc1/gpdb,arcivanov/postgres-xl,ashwinstar/gpdb,cjcjameson/gpdb,0x0FFF/gpdb,jmcatamney/gpdb,xinzweb/gpdb,chrishajas/gpdb,xinzweb/gpdb,Quikling/gpdb,lisakowen/gpdb,yazun/postgres-xl,0x0FFF/gpdb,postmind-net/postgres-xl,techdragon/Postgres-XL,lpetrov-pivotal/gpdb,rubikloud/gpdb,jmcatamney/gpdb,yuanzhao/gpdb,oberstet/postgres-xl,foyzur/gpdb,yuanzhao/gpdb,Postgres-XL/Postgres-XL,edespino/gpdb,lisakowen/gpdb,arcivanov/postgres-xl,zaksoup/gpdb,ashwinstar/gpdb,adam8157/gpdb,50wu/gpdb,50wu/gpdb,randomtask1155/gpdb,tangp3/gpdb,atris/gpdb,ahachete/gpdb,cjcjameson/gpdb,CraigHarris/gpdb,yuanzhao/gpdb,xuegang/gpdb,janebeckman/gpdb,rubikloud/gpdb,royc1/gpdb,xinzweb/gpdb,arcivanov/postgres-xl,ahachete/gpdb,kaknikhil/gpdb,lintzc/gpdb,royc1/gpdb,CraigHarris/gpdb,rubikloud/gpdb,zeroae/postgres-xl,zaksoup/gpdb,Chibin/gpdb,edespino/gpdb,edespino/gpdb,kaknikhil/gpdb,rvs/gpdb,tangp3/gpdb,xuegang/gpdb,greenplum-db/gpdb,Quikling/gpdb,oberstet/postgres-xl,snaga/postgres-xl,Quikling/gpdb,yazun/postgres-xl,lisakowen/gpdb,jmcatamney/gpdb,CraigHarris/gpdb,kaknikhil/gpdb,lisakowen/gpdb,Quikling/gpdb,oberstet/postgres-xl,CraigHarris/gpdb,xuegang/gpdb,arcivanov/postgres-xl,yazun/postgres-xl,tpostgres-projects/tPostgres,zaksoup/gpdb,janebeckman/gpdb,lintzc/gpdb,chrishajas/gpdb,royc1/gpdb,edespino/gpdb,ashwinstar/gpdb,snaga/postgres-xl,cjcjameson/gpdb,Postgres-XL/Postgres-XL,randomtask1155/gpdb,atris/gpdb,Quikling/gpdb,techdragon/Postgres-XL,kaknikhil/gpdb,ahachete/gpdb,zaksoup/gpdb,greenplum-db/gpdb,zeroae/postgres-xl,xuegang/gpdb,0x0FFF/gpdb,jmcatamney/gpdb,greenplum-db/gpdb,0x0FFF/gpdb,ovr/postgres-xl,randomtask1155/gpdb,ashwinstar/gpdb,foyzur/gpdb,pavanvd/postgres-xl,chrishajas/gpdb,rubikloud/gpdb,rvs/gpdb,edespino/gpdb,janebeckman/gpdb,ovr/postgres-xl,janebeckman/gpdb,50wu/gpdb,jmcatamney/gpdb,yuanzhao/gpdb,tangp3/gpdb,xuegang/gpdb,kmjungersen/PostgresXL,lisakowen/gpdb,zeroae/postgres-xl,techdragon/Postgres-XL,snaga/postgres-xl,0x0FFF/gpdb,xuegang/gpdb,CraigHarris/gpdb,adam8157/gpdb,Quikling/gpdb,kaknikhil/gpdb,Chibin/gpdb,xuegang/gpdb,yazun/postgres-xl,rvs/gpdb,Postgres-XL/Postgres-XL,Chibin/gpdb,foyzur/gpdb,lintzc/gpdb,janebeckman/gpdb,jmcatamney/gpdb,arcivanov/postgres-xl,lisakowen/gpdb,kmjungersen/PostgresXL,edespino/gpdb,pavanvd/postgres-xl,atris/gpdb,adam8157/gpdb,edespino/gpdb,lisakowen/gpdb,jmcatamney/gpdb,Chibin/gpdb,Chibin/gpdb,cjcjameson/gpdb,50wu/gpdb,50wu/gpdb,rvs/gpdb,postmind-net/postgres-xl,randomtask1155/gpdb,lpetrov-pivotal/gpdb,Chibin/gpdb,royc1/gpdb,ovr/postgres-xl,Chibin/gpdb,kaknikhil/gpdb,edespino/gpdb,ashwinstar/gpdb,snaga/postgres-xl,lpetrov-pivotal/gpdb,randomtask1155/gpdb,zaksoup/gpdb,yuanzhao/gpdb,royc1/gpdb,Quikling/gpdb,Quikling/gpdb,lisakowen/gpdb,techdragon/Postgres-XL,cjcjameson/gpdb,chrishajas/gpdb,adam8157/gpdb,ahachete/gpdb,foyzur/gpdb,rvs/gpdb,Quikling/gpdb,greenplum-db/gpdb,yuanzhao/gpdb,lintzc/gpdb,greenplum-db/gpdb,atris/gpdb,janebeckman/gpdb,lpetrov-pivotal/gpdb,50wu/gpdb,randomtask1155/gpdb,lpetrov-pivotal/gpdb,edespino/gpdb,tpostgres-projects/tPostgres,kmjungersen/PostgresXL,kaknikhil/gpdb,Quikling/gpdb,kaknikhil/gpdb,pavanvd/postgres-xl,kaknikhil/gpdb,janebeckman/gpdb,kmjungersen/PostgresXL,tpostgres-projects/tPostgres,cjcjameson/gpdb,snaga/postgres-xl,rubikloud/gpdb,xinzweb/gpdb,janebeckman/gpdb,zaksoup/gpdb,Postgres-XL/Postgres-XL,atris/gpdb,adam8157/gpdb,tangp3/gpdb,foyzur/gpdb,50wu/gpdb,lintzc/gpdb,foyzur/gpdb,xinzweb/gpdb,randomtask1155/gpdb,lintzc/gpdb,lpetrov-pivotal/gpdb,atris/gpdb,adam8157/gpdb,techdragon/Postgres-XL,rubikloud/gpdb,zeroae/postgres-xl,0x0FFF/gpdb,Chibin/gpdb,yuanzhao/gpdb,cjcjameson/gpdb,adam8157/gpdb,chrishajas/gpdb,chrishajas/gpdb,ashwinstar/gpdb,edespino/gpdb,chrishajas/gpdb,foyzur/gpdb,cjcjameson/gpdb,ahachete/gpdb,janebeckman/gpdb,0x0FFF/gpdb,50wu/gpdb,tpostgres-projects/tPostgres,greenplum-db/gpdb,tangp3/gpdb,postmind-net/postgres-xl,zaksoup/gpdb,ashwinstar/gpdb,rvs/gpdb,yuanzhao/gpdb,CraigHarris/gpdb,lpetrov-pivotal/gpdb,oberstet/postgres-xl,ashwinstar/gpdb,lintzc/gpdb,ovr/postgres-xl,rvs/gpdb,xinzweb/gpdb,ahachete/gpdb,chrishajas/gpdb,pavanvd/postgres-xl,atris/gpdb,ahachete/gpdb,cjcjameson/gpdb,CraigHarris/gpdb,0x0FFF/gpdb,royc1/gpdb,Postgres-XL/Postgres-XL,rubikloud/gpdb,pavanvd/postgres-xl,ovr/postgres-xl,xinzweb/gpdb,yuanzhao/gpdb,Chibin/gpdb,tangp3/gpdb,royc1/gpdb,xuegang/gpdb,yazun/postgres-xl,adam8157/gpdb,zaksoup/gpdb,postmind-net/postgres-xl,cjcjameson/gpdb,greenplum-db/gpdb,xinzweb/gpdb,ahachete/gpdb,xuegang/gpdb,jmcatamney/gpdb,atris/gpdb,kaknikhil/gpdb,zeroae/postgres-xl,rvs/gpdb,rvs/gpdb,rubikloud/gpdb,Chibin/gpdb,tangp3/gpdb,lintzc/gpdb,tangp3/gpdb,CraigHarris/gpdb,tpostgres-projects/tPostgres | src/interfaces/python/setup.py | src/interfaces/python/setup.py | #!/usr/bin/env python
# Setup script for the PyGreSQL version 3
# created 2000/04 Mark Alexander <mwa@gate.net>
# tweaked 2000/05 Jeremy Hylton <jeremy@cnri.reston.va.us>
# win32 support 2001/01 Gerhard Haering <gerhard@bigfoot.de>
# requires distutils; standard in Python 1.6, otherwise download from
# http://www.python.org/sigs/distutils-sig/download.html
# You may have to change the first 3 variables (include_dirs,
# library_dirs, optional_libs) to match your postgres distribution.
# Now, you can:
# python setup.py build # to build the module
# python setup.py install # to install it
# See http://www.python.org/sigs/distutils-sig/doc/ for more information
# on using distutils to install Python programs.
from distutils.core import setup
from distutils.extension import Extension
import sys
if sys.platform == "win32":
# If you want to build from source; you must have built a win32 native libpq # before and copied libpq.dll into the PyGreSQL root directory.
win_pg_build_root = 'd:/dev/pg/postgresql-7.0.2/'
include_dirs=[ win_pg_build_root + 'src/include', win_pg_build_root + '/src/include/libpq', win_pg_build_root + 'src', win_pg_build_root + 'src/interfaces/libpq' ]
library_dirs=[ win_pg_build_root + 'src/interfaces/libpq/Release' ]
optional_libs=[ 'libpqdll', 'wsock32', 'advapi32' ]
data_files = [ 'libpq.dll' ]
else:
include_dirs=['/usr/include/pgsql']
library_dirs=['usr/lib/pgsql']
optional_libs=['pq']
data_files = []
setup (name = "PyGreSQL",
version = "3.1",
description = "Python PostgreSQL Interfaces",
author = "D'Arcy J. M. Cain",
author_email = "darcy@druid.net",
url = "http://www.druid.net/pygresql/",
licence = "Python",
py_modules = ['pg', 'pgdb'],
ext_modules = [ Extension(
name='_pg',
sources = ['pgmodule.c'],
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = optional_libs
)]
data_files = data_files
)
| #!/usr/bin/env python
# Setup script for the PyGreSQL version 3
# created 2000/04 Mark Alexander <mwa@gate.net>
# tweaked 2000/05 Jeremy Hylton <jeremy@cnri.reston.va.us>
# win32 support 2001/01 Gerhard Haering <gerhard@bigfoot.de>
# requires distutils; standard in Python 1.6, otherwise download from
# http://www.python.org/sigs/distutils-sig/download.html
# You may have to change the first 3 variables (include_dirs,
# library_dirs, optional_libs) to match your postgres distribution.
# Now, you can:
# python setup.py build # to build the module
# python setup.py install # to install it
# See http://www.python.org/sigs/distutils-sig/doc/ for more information
# on using distutils to install Python programs.
from distutils.core import setup
import sys
if sys.platform == "win32":
# If you want to build from source; you must have built a win32 native libpq # before and copied libpq.dll into the PyGreSQL root directory.
win_pg_build_root = 'd:/dev/pg/postgresql-7.0.2/'
include_dirs=[ win_pg_build_root + 'src/include', win_pg_build_root + '/src/include/libpq', win_pg_build_root + 'src', win_pg_build_root + 'src/interfaces/libpq' ]
library_dirs=[ win_pg_build_root + 'src/interfaces/libpq/Release' ]
optional_libs=[ 'libpqdll', 'wsock32', 'advapi32' ]
data_files = [ 'libpq.dll' ]
else:
include_dirs=['/usr/include/pgsql']
library_dirs=['usr/lib/pgsql']
optional_libs=['pq']
data_files = []
setup (name = "PyGreSQL",
version = "3.1",
description = "Python PostgreSQL Interfaces",
author = "D'Arcy J. M. Cain",
author_email = "darcy@druid.net",
url = "http://www.druid.net/pygresql/",
licence = "Python",
py_modules = ['pg', 'pgdb'],
ext_modules = [ Extension(
name='_pg',
'sources': ['pgmodule.c'],
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': optional_libs
)]
data_files = data_files
)
| apache-2.0 | Python |
cea2a30d3f08af18efa318c1e0f7faedcf2d477a | update default metadata | josl/ASM_challenge,josl/Fastq-dump_ENA_NCBI,josl/NCBI-Downloader,josl/ASM_challenge,josl/NCBI-Downloader,josl/Fastq-dump_ENA_NCBI | isolates/template.py | isolates/template.py | metadata = {
"sample_name": "",
"group_name": "",
"file_names": "",
"sequencing_platform": "",
"sequencing_type": "",
"pre_assembled": "",
"sample_type": "",
"organism": "",
"strain": "",
"subtype": {},
"country": "",
"region": "",
"city": "",
"zip_code": "",
"longitude": "",
"latitude": "",
"location_note": "",
"isolation_source": "",
"source_note": "",
"pathogenic": "",
"pathogenicity_note": "",
"collection_date": "",
"collected_by": "",
"usage_restrictions": "",
"release_date": "",
"email_address": "",
"notes": "",
"batch": "true"
}
default = {
"mandatory": [
"sequencing_platform",
"sequencing_type",
"collection_date"
],
"seed": {
"pre_assembled": "no",
"country": "unknown",
"isolation_source": "unknown",
"sample_type": "isolate",
"organism": "",
"pathogenic": "yes",
"usage_restrictions": "public"
}
}
| metadata = {
"sample_name": "",
"group_name": "",
"file_names": "",
"sequencing_platform": "",
"sequencing_type": "",
"pre_assembled": "",
"sample_type": "",
"organism": "",
"strain": "",
"subtype": {},
"country": "",
"region": "",
"city": "",
"zip_code": "",
"longitude": "",
"latitude": "",
"location_note": "",
"isolation_source": "",
"source_note": "",
"pathogenic": "",
"pathogenicity_note": "",
"collection_date": "",
"collected_by": "",
"usage_restrictions": "",
"release_date": "",
"email_address": "",
"notes": "",
"batch": "true"
}
default = {
"mandatory": [
"pre_assembled",
"sequencing_platform",
"sequencing_type",
"country",
"isolation_source",
"collection_date"
],
"seed": {
"pre_assembled": "no",
"sample_type": "isolate",
"organism": "",
"pathogenic": "yes",
"usage_restrictions": "public",
"usage_delay": "0"
}
}
| apache-2.0 | Python |
9a5284d6aae3084b743c4392ba5322ec01af7484 | update Calderdale import script for latest data (closes #964) | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_calderdale.py | polling_stations/apps/data_collection/management/commands/import_calderdale.py | from django.contrib.gis.geos import Point
from django.db import connection
from pollingstations.models import PollingDistrict
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
from data_finder.helpers import geocode_point_only, PostcodeError
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E08000033'
districts_name = 'parl.2017-06-08/Version 2/Polling Districts.shp'
stations_name = 'parl.2017-06-08/Version 2/polling-stations.shp'
elections = ['parl.2017-06-08']
def parse_string(self, text):
try:
return text.strip().decode('utf-8')
except AttributeError:
return text.strip()
def district_record_to_dict(self, record):
# exclude duplicate/ambiguous code
if str(record[0]).strip() == 'DC':
return None
return {
'internal_council_id': str(record[0]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': str(record[0]).strip(),
}
def station_record_to_dict(self, record):
code = self.parse_string(record[1])
address = self.parse_string(record[0])
if code == '' and address == '':
return None
return {
'internal_council_id': code,
'address': address,
'postcode': '',
}
| from django.contrib.gis.geos import Point
from django.db import connection
from pollingstations.models import PollingDistrict
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
from data_finder.helpers import geocode_point_only, PostcodeError
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E08000033'
districts_name = 'parl.2017-06-08/Version 1/polling_districts.shp'
stations_name = 'parl.2017-06-08/Version 1/polling_districts.shp'
elections = ['parl.2017-06-08']
def district_record_to_dict(self, record):
# exclude duplicate/ambiguous code
if str(record[1]).strip() == 'DC':
return None
return {
'internal_council_id': str(record[1]).strip(),
'name': str(record[0]).strip(),
}
def station_record_to_dict(self, record):
# exclude duplicate/ambiguous code
if str(record[1]).strip() == 'DC':
return None
# grab the last bit of the address - it might be a postcode
postcode = record[2].split(",")[-1].strip()
# attempt to derive a point from it
try:
point = geocode_point_only(postcode)
location = Point(point['wgs84_lon'], point['wgs84_lat'], srid=4326)
except PostcodeError:
location = None
return {
'location': location,
'internal_council_id': str(record[1]).strip(),
'address': str(record[2]).strip(),
'postcode': '',
'polling_district_id': str(record[1]).strip(),
}
def post_import(self):
# fix dodgy polygons
print("running fixup SQL")
table_name = PollingDistrict()._meta.db_table
cursor = connection.cursor()
cursor.execute("""
UPDATE {0}
SET area=ST_Multi(ST_CollectionExtract(ST_MakeValid(area), 3))
WHERE NOT ST_IsValid(area);
""".format(table_name))
| bsd-3-clause | Python |
fa28e2c26181fd82a47162e5cb8004c97f2b42dc | increment version | gbrammer/grizli | grizli/version.py | grizli/version.py | # git describe --tags
__version__ = "0.4.0-8-g2808abe"
| # git describe --tags
__version__ = "0.4.0-3-g057a694"
| mit | Python |
fee766f9dd6edfbb24e300063c3133f01b71af97 | Update fetcher | hawson/db-builder,SteamDBAPI/db-builder | fetcher.py | fetcher.py | #!/usr/bin/python3
import requests
import json
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError
#Globals
engine = create_engine('sqlite:///games.db')
Base = declarative_base()
API_URL = "http://store.steampowered.com/api/appdetails/"
class Game(Base):
__tablename__ = 'games'
id = Column(Integer, primary_key=True)
init_price = Column(Integer)
final_price = Column(Integer)
def __repr__(self):
return "<Game(id='%s', initial_price='%s', final_price='%s')>" % (self.id, self.init_price, self.final_price)
def build_list():
game_list = dict()
URL = "http://api.steampowered.com/ISteamApps/GetAppList/v2"
response = requests.get(URL)
game_list = json.loads(response.text)
return game_list
def fetchdump(appids):
#TODO: Make sure we were fed a list of strings, not list of ints
Session = sessionmaker(bind=engine)
session = Session()
params = {
"appids": "," . join(appids),
"filters": "price_overview"
}
response = requests.get(API_URL, params=params)
data = json.loads(response.text)
for game in data:
if data[game]["success"] is True:
if data[game]["data"]:
init_price = data[game]["data"]["price_overview"]["initial"]
final_price = data[game]["data"]["price_overview"]["final"]
game_obj = Game(id=game, init_price=init_price, final_price=final_price)
session.add(game_obj)
else:
print("ID %s is false" % game)
try:
session.commit()
except IntegrityError as err:
print("Error updating DB! %s" % err)
def main():
master_list = build_list()
#TEST: first 20 in list, earliest ID's. There has got to be a better way to do this!
apps = ""
for app in master_list["applist"]["apps"][:20]:
junk = " " + str(app["appid"])
apps += junk
appids = apps.split(' ')
Base.metadata.create_all(engine)
fetchdump(appids)
if __name__ == "__main__":
main()
| #!/usr/bin/python3
import requests
import json
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError
#Globals
engine = create_engine('sqlite:///games.db')
Base = declarative_base()
API_URL = "http://store.steampowered.com/api/appdetails/"
class Game(Base):
__tablename__ = 'games'
id = Column(Integer, primary_key=True)
init_price = Column(Integer)
final_price = Column(Integer)
def __repr__(self):
return "<Game(id='%s', initial_price='%s', final_price='%s')>" % (self.id, self.init_price, self.final_price)
def fetchdump(appids):
#TODO: Make sure we were fed a list of strings, not list of ints
Session = sessionmaker(bind=engine)
session = Session()
params = {
"appids": "," . join(appids),
"filters": "price_overview"
}
response = requests.get(API_URL, params=params)
data = json.loads(response.text)
for game in data:
if data[game]["success"] is True:
if data[game]["data"]:
init_price = data[game]["data"]["price_overview"]["initial"]
final_price = data[game]["data"]["price_overview"]["final"]
game_obj = Game(id=game, init_price=init_price, final_price=final_price)
session.add(game_obj)
try:
session.commit()
except IntegrityError as err:
print("Error updateing DB! %s" % err)
def main():
appids=["383","440","730","31820","8190"]
Base.metadata.create_all(engine)
fetchdump(appids)
if __name__ == "__main__":
main()
| bsd-3-clause | Python |
12ec60b1974db666153f038c6bb55ce11fc101de | Update __init__.py | aspuru-guzik-group/selfies | build/lib/selfies/__init__.py | build/lib/selfies/__init__.py | #!/usr/bin/env python
__author__ = 'Mario Krenn'
__version__ = 'v0.2.0'
from .selfies_fcts import encoder, decoder
| #!/usr/bin/env python
__author__ = 'Mario Krenn'
__version__ = 'v0.1.2'
from .selfies_fcts import encoder, decoder
| apache-2.0 | Python |
d9365f726b5972ca39bac56efb48a452521811be | add more info to dbinfo | idies/pyJHTDB,idies/pyJHTDB,idies/pyJHTDB,idies/pyJHTDB | dbinfo.py | dbinfo.py | import numpy as np
import os
package_dir, package_filename = os.path.split(__file__)
isotropic1024coarse = {'name' : 'isotropic1024coarse'}
for coord in ['x', 'y', 'z']:
isotropic1024coarse[coord + 'nodes'] = (np.pi/512)*np.array(range(1024), dtype = np.float32)
isotropic1024coarse['n' + coord] = 1024
isotropic1024coarse['l' + coord] = 2*np.pi
isotropic1024coarse['d' + coord] = np.pi/512
isotropic1024coarse[coord + 'periodic'] = True
isotropic1024coarse[coord + 'uniform'] = True
mhd1024 = {}
for key in isotropic1024coarse.keys():
mhd1024[key] = isotropic1024coarse[key]
mhd1024['name'] = 'mhd1024'
for coord in ['x', 'y', 'z']:
mhd1024[coord + 'nodes'] = (np.pi/512)*np.array(range(1024), dtype = np.float32)
mhd1024['n' + coord] = 1024
mhd1024['l' + coord] = 2*np.pi
mhd1024['d' + coord] = np.pi/512
mhd1024[coord + 'periodic'] = True
mhd1024[coord + 'uniform'] = True
mhd1024['time'] = np.array(range(1024), dtype = np.float32) * 2.56 / 1024
mhd1024['nu'] = 1.1e-4
mhd1024['eta'] = 1.1e-4
channel = {'name' : 'channel',
'xnodes' : np.load(os.path.join(package_dir, 'data/channel_xgrid.npy')),
'ynodes' : np.load(os.path.join(package_dir, 'data/channel_ygrid.npy')),
'znodes' : np.load(os.path.join(package_dir, 'data/channel_zgrid.npy')),
'lx' : 8*np.pi,
'ly' : 2.,
'lz' : 3*np.pi,}
for coord in ['x', 'z']:
channel['n' + coord] = channel[coord + 'nodes'].shape[0]
channel[coord + 'periodic'] = True
channel[coord + 'uniform'] = True
channel['d' + coord] = channel['l' + coord] / channel['n' + coord]
channel['ny'] = 512
channel['dy'] = channel['ynodes'][1:] - channel['ynodes'][:channel['ynodes'].shape[0]-1]
channel['dy'] = np.append(channel['dy'], [channel['dy'][0]])
channel['yperiodic'] = False
channel['yuniform'] = False
| import numpy as np
import os
package_dir, package_filename = os.path.split(__file__)
isotropic1024coarse = {'name' : 'isotropic1024coarse'}
for coord in ['x', 'y', 'z']:
isotropic1024coarse[coord + 'nodes'] = (np.pi/512)*np.array(range(1024), dtype = np.float32)
isotropic1024coarse['n' + coord] = 1024
isotropic1024coarse['l' + coord] = 2*np.pi
isotropic1024coarse['d' + coord] = np.pi/512
isotropic1024coarse[coord + 'periodic'] = True
isotropic1024coarse[coord + 'uniform'] = True
mhd1024 = {}
for key in isotropic1024coarse.keys():
mhd1024[key] = isotropic1024coarse[key]
mhd1024['name'] = 'mhd1024'
for coord in ['x', 'y', 'z']:
mhd1024[coord + 'nodes'] = (np.pi/512)*np.array(range(1024), dtype = np.float32)
mhd1024['n' + coord] = 1024
mhd1024['l' + coord] = 2*np.pi
mhd1024['d' + coord] = np.pi/512
mhd1024[coord + 'periodic'] = True
mhd1024[coord + 'uniform'] = True
mhd1024['time'] = np.array(range(1024), dtype = np.float32) * 2.56 / 1024
channel = {'name' : 'channel',
'xnodes' : np.load(os.path.join(package_dir, 'data/channel_xgrid.npy')),
'ynodes' : np.load(os.path.join(package_dir, 'data/channel_ygrid.npy')),
'znodes' : np.load(os.path.join(package_dir, 'data/channel_zgrid.npy')),
'lx' : 8*np.pi,
'ly' : 2.,
'lz' : 3*np.pi,}
for coord in ['x', 'z']:
channel['n' + coord] = channel[coord + 'nodes'].shape[0]
channel[coord + 'periodic'] = True
channel[coord + 'uniform'] = True
channel['d' + coord] = channel['l' + coord] / channel['n' + coord]
channel['ny'] = 512
channel['dy'] = channel['ynodes'][1:] - channel['ynodes'][:channel['ynodes'].shape[0]-1]
channel['dy'] = np.append(channel['dy'], [channel['dy'][0]])
channel['yperiodic'] = False
channel['yuniform'] = False
| apache-2.0 | Python |
577534988206b91b2f99f60cb48140b4c92acf4a | Reduce logging around http clients | dlecocq/nsq-py,dlecocq/nsq-py | nsq/http/__init__.py | nsq/http/__init__.py | '''Our clients for interacting with various clients'''
from decorator import decorator
import requests
from .. import json, logger
from ..exceptions import NSQException
@decorator
def wrap(function, *args, **kwargs):
'''Wrap a function that returns a request with some exception handling'''
try:
req = function(*args, **kwargs)
logger.debug('Got %s: %s', req.status_code, req.content)
if req.status_code == 200:
return req
else:
raise ClientException(req.reason, req.content)
except ClientException:
raise
except Exception as exc:
raise ClientException(exc)
@decorator
def json_wrap(function, *args, **kwargs):
'''Return the json content of a function that returns a request'''
try:
# Some responses have data = None, but they generally signal a
# successful API call as well.
return json.loads(function(*args, **kwargs).content)['data'] or True
except Exception as exc:
raise ClientException(exc)
@decorator
def ok_check(function, *args, **kwargs):
'''Ensure that the response body is OK'''
req = function(*args, **kwargs)
if req.content.lower() != 'ok':
raise ClientException(req.content)
return req.content
class ClientException(NSQException):
'''An exception class for all client errors'''
class BaseClient(object):
'''Base client class'''
def __init__(self, host):
'''Host may be a 'host:port' string or a (host, port) tuple'''
if isinstance(host, basestring):
# Strip off the scheme if any was provideds
_, __, hostname = host.partition('//')
self._host, _, self._port = hostname.partition(':')
elif isinstance(host, (tuple, list)):
self._host, self._port = host
else:
raise TypeError('Host must be a string or tuple')
assert self._host, 'Must provide a host'
assert self._port, 'Must provide a port'
@wrap
def get(self, path, *args, **kwargs):
'''GET the provided endpoint'''
url = 'http://%s:%s%s' % (self._host, self._port, path)
logger.debug('GET %s with %s, %s', url, args, kwargs)
return requests.get(url, *args, **kwargs)
@wrap
def post(self, path, *args, **kwargs):
'''POST to the provided endpoint'''
url = 'http://%s:%s%s' % (self._host, self._port, path)
logger.debug('POST %s with %s, %s', url, args, kwargs)
return requests.post(url, *args, **kwargs)
| '''Our clients for interacting with various clients'''
from decorator import decorator
import requests
from .. import json, logger
from ..exceptions import NSQException
@decorator
def wrap(function, *args, **kwargs):
'''Wrap a function that returns a request with some exception handling'''
try:
req = function(*args, **kwargs)
logger.info('Got %s: %s', req.status_code, req.content)
if req.status_code == 200:
return req
else:
raise ClientException(req.reason, req.content)
except ClientException:
raise
except Exception as exc:
raise ClientException(exc)
@decorator
def json_wrap(function, *args, **kwargs):
'''Return the json content of a function that returns a request'''
try:
# Some responses have data = None, but they generally signal a
# successful API call as well.
return json.loads(function(*args, **kwargs).content)['data'] or True
except Exception as exc:
raise ClientException(exc)
@decorator
def ok_check(function, *args, **kwargs):
'''Ensure that the response body is OK'''
req = function(*args, **kwargs)
if req.content.lower() != 'ok':
raise ClientException(req.content)
return req.content
class ClientException(NSQException):
'''An exception class for all client errors'''
class BaseClient(object):
'''Base client class'''
def __init__(self, host):
'''Host may be a 'host:port' string or a (host, port) tuple'''
if isinstance(host, basestring):
# Strip off the scheme if any was provideds
_, __, hostname = host.partition('//')
self._host, _, self._port = hostname.partition(':')
elif isinstance(host, (tuple, list)):
self._host, self._port = host
else:
raise TypeError('Host must be a string or tuple')
assert self._host, 'Must provide a host'
assert self._port, 'Must provide a port'
@wrap
def get(self, path, *args, **kwargs):
'''GET the provided endpoint'''
url = 'http://%s:%s%s' % (self._host, self._port, path)
logger.info('Get %s with %s, %s', url, args, kwargs)
return requests.get(url, *args, **kwargs)
@wrap
def post(self, path, *args, **kwargs):
'''POST to the provided endpoint'''
return requests.post(
'http://%s:%s%s' % (self._host, self._port, path), *args, **kwargs)
| mit | Python |
fda70bfff7ec9d25f7c08c92d570dfc52754734c | Fix typo in description scting. | usc-isi-i2/dig-crf,usc-isi-i2/dig-crf | applyCrfPjSparkTest.py | applyCrfPjSparkTest.py | #!/usr/bin/env python
"""This program will use Apache Spark to read a keyed JSON Lines file (such as
adjudicated_modeled_live_eyehair_100.kjsonl), convert it to a pair RDD,
process it with CRF++, and print detected attributes as pair RDD keyed JSON
files, formatted to Karma's liking. The keys in the input file will be passed
through to the output file, but the text and tokens will not.
"""
import argparse
import sys
from pyspark import SparkContext
import applyCrf
def main(argv=None):
'''this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('-d','--debug', help="Optionally give debugging feedback.", required=False, action='store_true')
parser.add_argument('--download', help="Optionally ask Spark to download the feature list and model files to the clients.", required=False, action='store_true')
parser.add_argument('-f','--featlist', help="Required input file with features to be extracted, one feature entry per line.", required=True)
parser.add_argument('-i','--input', help="Required input file with Web scraping sentences in keyed JSON Lines format.", required=True)
parser.add_argument('-m','--model', help="Required input model file.", required=True)
parser.add_argument('-o','--output', help="Required output file of phrases in keyed JSON Lines format.", required=True)
parser.add_argument('-p','--partitions', help="Optional number of partitions.", required=False, type=int, default=1)
parser.add_argument('-s','--statistics', help="Optionally report use statistics.", required=False, action='store_true')
args = parser.parse_args()
if args.debug:
print "Starting applyCrfPjSparkTest."
sc = SparkContext()
featlist = args.featlist
model = args.model
if args.download:
# Ask Spark to download the feature list and model files from the driver to the clients.
sc.addFile(featlist)
sc.addFile(model)
inputLinesRDD = sc.textFile(args.input, args.partitions)
inputPairsRDD = inputLinesRDD.map(lambda s: s.split('\t', 1))
tagger = applyCrf.ApplyCrfPj(featlist, model, args.debug, args.statistics)
tagger.setDownload(args.download)
resultsRDD = tagger.perform(inputPairsRDD)
resultsRDD.saveAsTextFile(args.output)
if args.debug:
print "Ending applyCrfPjSparkTest."
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
"""This program will use Apache Spark to read a keyed JSON Lines file (such as
adjudicated_modeled_live_eyehair_100.kjsonl), convert it to a pair RDD,
process it with CRF++, and print detected attributes as pair RDD keyed JSON
files, formatted to Karma's liking. The keys in the input file will be passed
through to the output file, but the text and tokens will not.
"""
import argparse
import sys
from pyspark import SparkContext
import applyCrf
def main(argv=None):
'''this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('-d','--debug', help="Optionallly give debugging feedback.", required=False, action='store_true')
parser.add_argument('--download', help="Optionally ask Spark to download the feature list and model files to the clients.", required=False, action='store_true')
parser.add_argument('-f','--featlist', help="Required input file with features to be extracted, one feature entry per line.", required=True)
parser.add_argument('-i','--input', help="Required input file with Web scraping sentences in keyed JSON Lines format.", required=True)
parser.add_argument('-m','--model', help="Required input model file.", required=True)
parser.add_argument('-o','--output', help="Required output file of phrases in keyed JSON Lines format.", required=True)
parser.add_argument('-p','--partitions', help="Optional number of partitions.", required=False, type=int, default=1)
parser.add_argument('-s','--statistics', help="Optionally report use statistics.", required=False, action='store_true')
args = parser.parse_args()
if args.debug:
print "Starting applyCrfPjSparkTest."
sc = SparkContext()
featlist = args.featlist
model = args.model
if args.download:
# Ask Spark to download the feature list and model files from the driver to the clients.
sc.addFile(featlist)
sc.addFile(model)
inputLinesRDD = sc.textFile(args.input, args.partitions)
inputPairsRDD = inputLinesRDD.map(lambda s: s.split('\t', 1))
tagger = applyCrf.ApplyCrfPj(featlist, model, args.debug, args.statistics)
tagger.setDownload(args.download)
resultsRDD = tagger.perform(inputPairsRDD)
resultsRDD.saveAsTextFile(args.output)
if args.debug:
print "Ending applyCrfPjSparkTest."
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | Python |
691093e38598959f98b319f7c57852496a26ba90 | Fix project name in urlconf | onespacemedia/cms-jobs,onespacemedia/cms-jobs | apps/careers/models.py | apps/careers/models.py | import watson
from cms.apps.pages.models import ContentBase
from cms.models import HtmlField, SearchMetaBase
from django.db import models
class Careers(ContentBase):
# The heading that the admin places this content under.
classifier = "apps"
# The urlconf used to power this content's views.
urlconf = "{{ project_name }}.apps.careers.urls"
standfirst = models.TextField(
blank=True,
null=True
)
per_page = models.IntegerField(
"careers per page",
default=5,
blank=True,
null=True
)
def __unicode__(self):
return self.page.title
class Career(SearchMetaBase):
page = models.ForeignKey(
Careers
)
title = models.CharField(
max_length=256,
)
slug = models.CharField(
max_length=256,
unique=True
)
location = models.CharField(
max_length=256,
blank=True,
null=True
)
summary = models.TextField(
blank=True,
null=True
)
description = HtmlField()
email_address = models.EmailField()
order = models.PositiveIntegerField(
default=0
)
class Meta:
ordering = ('order',)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return self.page.page.reverse('career', kwargs={
'slug': self.slug,
})
watson.register(Career)
| import watson
from cms.apps.pages.models import ContentBase
from cms.models import HtmlField, SearchMetaBase
from django.db import models
class Careers(ContentBase):
# The heading that the admin places this content under.
classifier = "apps"
# The urlconf used to power this content's views.
urlconf = "phixflow.apps.careers.urls"
standfirst = models.TextField(
blank=True,
null=True
)
per_page = models.IntegerField(
"careers per page",
default=5,
blank=True,
null=True
)
def __unicode__(self):
return self.page.title
class Career(SearchMetaBase):
page = models.ForeignKey(
Careers
)
title = models.CharField(
max_length=256,
)
slug = models.CharField(
max_length=256,
unique=True
)
location = models.CharField(
max_length=256,
blank=True,
null=True
)
summary = models.TextField(
blank=True,
null=True
)
description = HtmlField()
email_address = models.EmailField()
order = models.PositiveIntegerField(
default=0
)
class Meta:
ordering = ('order',)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return self.page.page.reverse('career', kwargs={
'slug': self.slug,
})
watson.register(Career)
| mit | Python |
56efc8fe371f9bbd7a39740908f5ca7a97010ad9 | insert query modification | P1X-in/Tanks-of-Freedom-Server | tof_server/views.py | tof_server/views.py | """This module provides views for application."""
from tof_server import app, versioning, mysql
from flask import jsonify, make_response
import string, random
@app.route('/')
def index():
"""Server information"""
return jsonify({
'server-version' : versioning.SERVER_VERSION,
'client-versions' : versioning.CLIENT_VERSIONS
})
@app.route('/players', methods=['POST'])
def generate_new_id():
"""Method for generating new unique player ids"""
try:
cursor = mysql.connection.cursor()
new_pin = ''
characters_pool = string.ascii_uppercase + string.digits
for _ in range(8):
new_pin = new_pin + random.SystemRandom().choice(characters_pool)
insert_sql = "INSERT INTO players (auto_pin) VALUES (%s)"
id_sql = "SELECT LAST_INSERT_ID()"
cursor.execute(insert_sql, (new_pin))
cursor.execute(id_sql)
insert_data = cursor.fetchone()
return jsonify({
'id' : insert_data[0],
'pin' : new_pin
})
except Exception as er_msg:
return make_response(jsonify({
'error' : str(er_msg)
}), 500)
finally:
cursor.close()
| """This module provides views for application."""
from tof_server import app, versioning, mysql
from flask import jsonify, make_response
import string, random
@app.route('/')
def index():
"""Server information"""
return jsonify({
'server-version' : versioning.SERVER_VERSION,
'client-versions' : versioning.CLIENT_VERSIONS
})
@app.route('/players', methods=['POST'])
def generate_new_id():
"""Method for generating new unique player ids"""
try:
cursor = mysql.connection.cursor()
new_pin = ''
characters_pool = string.ascii_uppercase + string.digits
for _ in range(8):
new_pin = new_pin + random.SystemRandom().choice(characters_pool)
insert_sql = "INSERT INTO players (auto_pin) VALUES ('%s')"
id_sql = "SELECT LAST_INSERT_ID()"
cursor.execute(insert_sql, (new_pin))
cursor.execute(id_sql)
insert_data = cursor.fetchone()
return jsonify({
'id' : insert_data[0],
'pin' : new_pin
})
except Exception as er_msg:
return make_response(jsonify({
'error' : str(er_msg)
}), 500)
finally:
cursor.close()
| mit | Python |
28768a5826d83bd9cccb56d0d31dc1e83b671f65 | Add command to output. | Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server | pykeg/core/tests.py | pykeg/core/tests.py | # Copyright 2014 Bevbot LLC, All Rights Reserved
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
"""Generic unittests."""
import os
import subprocess
from django.test import TestCase
from django.utils.importlib import import_module
def path_for_import(name):
"""
Returns the directory path for the given package or module.
"""
return os.path.dirname(os.path.abspath(import_module(name).__file__))
class CoreTests(TestCase):
def test_flake8(self):
root_path = path_for_import('pykeg')
command = 'flake8 {}'.format(root_path)
try:
subprocess.check_output(command.split())
except subprocess.CalledProcessError as e:
print 'command: {}'.format(command)
print e.output
self.fail('flake8 failed with return code {}.'.format(e.returncode))
| # Copyright 2014 Bevbot LLC, All Rights Reserved
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
"""Generic unittests."""
import os
import subprocess
from django.test import TestCase
from django.utils.importlib import import_module
def path_for_import(name):
"""
Returns the directory path for the given package or module.
"""
return os.path.dirname(os.path.abspath(import_module(name).__file__))
class CoreTests(TestCase):
def test_flake8(self):
root_path = path_for_import('pykeg')
command = 'flake8 {}'.format(root_path)
try:
subprocess.check_output(command.split())
except subprocess.CalledProcessError as e:
print e.output
self.fail('flake8 failed with return code {}.'.format(e.returncode))
| mit | Python |
71bac0bf51a75094e2f8e4edaa94a4840e0a3572 | Include Chapter class in __init__.py (#304) | JelteF/PyLaTeX,JelteF/PyLaTeX | pylatex/__init__.py | pylatex/__init__.py | """
A library for creating Latex files.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from .basic import HugeText, NewPage, LineBreak, NewLine, HFill, LargeText, \
MediumText, SmallText, FootnoteText, TextColor
from .document import Document
from .frames import MdFramed, FBox
from .math import Math, VectorName, Matrix, Alignat
from .package import Package
from .section import Chapter, Section, Subsection, Subsubsection
from .table import Table, MultiColumn, MultiRow, Tabular, Tabu, LongTable, \
LongTabu, Tabularx, LongTabularx, ColumnType
from .tikz import TikZ, Axis, Plot, TikZNode, TikZDraw, TikZCoordinate, \
TikZPathList, TikZPath, TikZUserPath, TikZOptions, TikZNodeAnchor, \
TikZScope
from .figure import Figure, SubFigure, StandAloneGraphic
from .lists import Enumerate, Itemize, Description
from .quantities import Quantity
from .base_classes import Command, UnsafeCommand
from .utils import NoEscape, escape_latex
from .errors import TableRowSizeError
from .headfoot import PageStyle, Head, Foot, simple_page_number
from .position import Center, FlushLeft, FlushRight, MiniPage, TextBlock, \
HorizontalSpace, VerticalSpace
from .labelref import Marker, Label, Ref, Pageref, Eqref, Autoref, Hyperref
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| """
A library for creating Latex files.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from .basic import HugeText, NewPage, LineBreak, NewLine, HFill, LargeText, \
MediumText, SmallText, FootnoteText, TextColor
from .document import Document
from .frames import MdFramed, FBox
from .math import Math, VectorName, Matrix, Alignat
from .package import Package
from .section import Section, Subsection, Subsubsection
from .table import Table, MultiColumn, MultiRow, Tabular, Tabu, LongTable, \
LongTabu, Tabularx, LongTabularx, ColumnType
from .tikz import TikZ, Axis, Plot, TikZNode, TikZDraw, TikZCoordinate, \
TikZPathList, TikZPath, TikZUserPath, TikZOptions, TikZNodeAnchor, \
TikZScope
from .figure import Figure, SubFigure, StandAloneGraphic
from .lists import Enumerate, Itemize, Description
from .quantities import Quantity
from .base_classes import Command, UnsafeCommand
from .utils import NoEscape, escape_latex
from .errors import TableRowSizeError
from .headfoot import PageStyle, Head, Foot, simple_page_number
from .position import Center, FlushLeft, FlushRight, MiniPage, TextBlock, \
HorizontalSpace, VerticalSpace
from .labelref import Marker, Label, Ref, Pageref, Eqref, Autoref, Hyperref
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| mit | Python |
fd5f7781ceb696ab90ddd733424695361fb02233 | Fix typo, 'unitest.main()' -> 'unittest.main()'. | armandobs14/rdflib,ssssam/rdflib,ssssam/rdflib,yingerj/rdflib,yingerj/rdflib,RDFLib/rdflib,yingerj/rdflib,avorio/rdflib,marma/rdflib,RDFLib/rdflib,avorio/rdflib,yingerj/rdflib,dbs/rdflib,dbs/rdflib,armandobs14/rdflib,RDFLib/rdflib,armandobs14/rdflib,avorio/rdflib,ssssam/rdflib,marma/rdflib,marma/rdflib,armandobs14/rdflib,marma/rdflib,RDFLib/rdflib,dbs/rdflib,dbs/rdflib,avorio/rdflib,ssssam/rdflib | test/events.py | test/events.py |
import unittest
from rdflib import events
class AddedEvent(events.Event): pass
class RemovedEvent(events.Event): pass
def subscribe_to(source, target):
target.subscribe(AddedEvent, source._add_handler)
target.subscribe(RemovedEvent, source._remove_handler)
def subscribe_all(caches):
for cache in caches:
for other in caches:
if other != cache:
subscribe_to(cache, other)
class Cache(events.Dispatcher):
def __init__(self, data=None):
if data is None: data = {}
self._data = data
self.subscribe(AddedEvent, self._add_handler)
self.subscribe(RemovedEvent, self._remove_handler)
def _add_handler(self, event):
self._data[event.key] = event.value
def _remove_handler(self, event):
del self._data[event.key]
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self.dispatch(AddedEvent(key=key, value=value))
def __delitem__(self, key):
self.dispatch(RemovedEvent(key=key))
def has_key(self, key):
return self._data.has_key(key)
class EventTestCase(unittest.TestCase):
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1,c2,c3])
c1['bob'] = 'uncle'
assert c2['bob'] == 'uncle'
assert c3['bob'] == 'uncle'
del c3['bob']
assert c1.has_key('bob') == False
assert c2.has_key('bob') == False
if __name__ == "__main__":
unittest.main()
|
import unittest
from rdflib import events
class AddedEvent(events.Event): pass
class RemovedEvent(events.Event): pass
def subscribe_to(source, target):
target.subscribe(AddedEvent, source._add_handler)
target.subscribe(RemovedEvent, source._remove_handler)
def subscribe_all(caches):
for cache in caches:
for other in caches:
if other != cache:
subscribe_to(cache, other)
class Cache(events.Dispatcher):
def __init__(self, data=None):
if data is None: data = {}
self._data = data
self.subscribe(AddedEvent, self._add_handler)
self.subscribe(RemovedEvent, self._remove_handler)
def _add_handler(self, event):
self._data[event.key] = event.value
def _remove_handler(self, event):
del self._data[event.key]
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self.dispatch(AddedEvent(key=key, value=value))
def __delitem__(self, key):
self.dispatch(RemovedEvent(key=key))
def has_key(self, key):
return self._data.has_key(key)
class EventTestCase(unittest.TestCase):
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1,c2,c3])
c1['bob'] = 'uncle'
assert c2['bob'] == 'uncle'
assert c3['bob'] == 'uncle'
del c3['bob']
assert c1.has_key('bob') == False
assert c2.has_key('bob') == False
if __name__ == "__main__":
unitest.main()
| bsd-3-clause | Python |
718efabd2af075fa30cc84000e4d4594418cf42d | Tweak genini. | LingxiaoJIA/gem5,haowu4682/gem5,haowu4682/gem5,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,LingxiaoJIA/gem5,haowu4682/gem5,haowu4682/gem5,LingxiaoJIA/gem5,andrewfu0325/gem5-aladdin,haowu4682/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5 | test/genini.py | test/genini.py | #!/usr/bin/env python
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getopt, os, os.path, sys
from os.path import join as joinpath, realpath
mypath = sys.path[0]
sys.path.append(joinpath(mypath, '..'))
sys.path.append(joinpath(mypath, '../configs/kernel'))
sys.path.append(joinpath(mypath, '../sim/pyconfig'))
from importer import mpy_exec, mpy_execfile, AddToPath
from m5config import *
try:
opts, args = getopt.getopt(sys.argv[1:], '-E:')
for o,a in opts:
if o == '-E':
offset = a.find('=')
if offset == -1:
name = a
value = True
else:
name = a[:offset]
value = a[offset+1:]
env[name] = value
except getopt.GetoptError:
sys.exit('Improper Usage')
for arg in args:
AddToPath(os.path.dirname(arg))
mpy_execfile(arg)
if globals().has_key('root') and isinstance(root, type) \
and issubclass(root, Root):
instantiate(root)
| #!/usr/bin/env python
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getopt, os, os.path, sys
sys.path.append('..')
sys.path.append('../configs/kernel')
sys.path.append('../sim/pyconfig')
from importer import mpy_exec, mpy_execfile, AddToPath
from m5config import *
try:
opts, args = getopt.getopt(sys.argv[1:], '-E:')
for o,a in opts:
if o == '-E':
offset = a.find('=')
if offset == -1:
name = a
value = True
else:
name = a[:offset]
value = a[offset+1:]
env[name] = value
except getopt.GetoptError:
sys.exit('Improper Usage')
for arg in args:
AddToPath(os.path.dirname(arg))
mpy_execfile(arg)
if globals().has_key('root') and isinstance(root, type) \
and issubclass(root, Root):
instantiate(root)
| bsd-3-clause | Python |
46bdf7225012c33835b41a721eece41d7256ec86 | Fix removed django.conf.urls.defaults import. | trantu/seantis-questionnaire,affan2/ed-questionnaire,eugena/ed-questionnaire,seantis/seantis-questionnaire,eugena/ed-questionnaire,daniboy/seantis-questionnaire,trantu/seantis-questionnaire,eugena/ed-questionnaire,JanOosting/ed-questionnaire,affan2/ed-questionnaire,trantu/seantis-questionnaire,JanOosting/ed-questionnaire,seantis/seantis-questionnaire,eugena/seantis-questionnaire,affan2/ed-questionnaire,eugena/seantis-questionnaire,seantis/seantis-questionnaire,daniboy/seantis-questionnaire,eugena/seantis-questionnaire,daniboy/seantis-questionnaire,JanOosting/ed-questionnaire | questionnaire/urls.py | questionnaire/urls.py | # vim: set fileencoding=utf-8
from django.conf.urls import *
from views import *
urlpatterns = patterns('',
url(r'^$',
questionnaire, name='questionnaire_noargs'),
url(r'^csv/(?P<qid>\d+)/$',
export_csv, name='export_csv'),
url(r'^(?P<runcode>[^/]+)/progress/$',
get_async_progress, name='progress'),
url(r'^(?P<runcode>[^/]+)/(?P<qs>[-]{0,1}\d+)/$',
questionnaire, name='questionset'),
url(r'^(?P<runcode>[^/]+)/$',
questionnaire, name='questionnaire'),
)
| # vim: set fileencoding=utf-8
from django.conf.urls.defaults import *
from views import *
urlpatterns = patterns('',
url(r'^$',
questionnaire, name='questionnaire_noargs'),
url(r'^csv/(?P<qid>\d+)/$',
export_csv, name='export_csv'),
url(r'^(?P<runcode>[^/]+)/progress/$',
get_async_progress, name='progress'),
url(r'^(?P<runcode>[^/]+)/(?P<qs>[-]{0,1}\d+)/$',
questionnaire, name='questionset'),
url(r'^(?P<runcode>[^/]+)/$',
questionnaire, name='questionnaire'),
)
| bsd-3-clause | Python |
cdb5023d841158f7040ee263a2eb85f11b5c5836 | fix brokens tests for mds | ceph/ceph-ansible,travmi/ceph-ansible,fgal/ceph-ansible,jtaleric/ceph-ansible,jtaleric/ceph-ansible,ceph/ceph-ansible,font/ceph-ansible,fgal/ceph-ansible,travmi/ceph-ansible,font/ceph-ansible | tests/functional/tests/mds/test_mds.py | tests/functional/tests/mds/test_mds.py | import pytest
import json
class TestMDSs(object):
@pytest.mark.no_docker
def test_mds_is_installed(self, node, host):
assert host.package("ceph-mds").is_installed
def test_mds_service_is_running(self, node, host):
service_name = "ceph-mds@{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_running
def test_mds_service_is_enabled(self, node, host):
service_name = "ceph-mds@{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_enabled
@pytest.mark.no_docker
def test_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cmd = "sudo ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(cluster=node['cluster_name'])
output = host.check_output(cmd)
daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"]
assert hostname in daemons
@pytest.mark.docker
def test_docker_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
hostname=node["vars"]["inventory_hostname"],
cluster=node["cluster_name"]
)
output = host.check_output(cmd)
daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"]
assert hostname in daemons
| import pytest
import json
class TestMDSs(object):
@pytest.mark.no_docker
def test_mds_is_installed(self, node, host):
assert host.package("ceph-mds").is_installed
def test_mds_service_is_running(self, node, host):
service_name = "ceph-mds@{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_running
def test_mds_service_is_enabled(self, node, host):
service_name = "ceph-mds@{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_enabled
@pytest.mark.no_docker
def test_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cmd = "sudo ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(cluster=node['cluster_name'])
output = host.check_output(cmd)
daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"]
assert hostname in daemons
@pytest.mark.docker
def test_docker_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
hostname = node["groups"]["mons"][0]["inventory_hostname"]
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
hostname=node["vars"]["inventory_hostname"],
cluster=node["cluster_name"]
)
output = host.check_output(cmd)
daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"]
assert hostname in daemons
| apache-2.0 | Python |
d139aba5ef1ddbb87bf28171e3322c41819f2b06 | Bump version 0.3.7 | douban/pymesos | pymesos/__init__.py | pymesos/__init__.py | from .interface import Scheduler, Executor, OperatorMaster
from .scheduler import MesosSchedulerDriver
from .executor import MesosExecutorDriver
from .operator_v1 import MesosOperatorMasterDriver, MesosOperatorAgentDriver
from .utils import encode_data, decode_data
__VERSION__ = '0.3.7'
__all__ = (
'Scheduler',
'MesosSchedulerDriver',
'Executor',
'MesosExecutorDriver',
'encode_data',
'decode_data',
'OperatorMaster',
'MesosOperatorMasterDriver',
'MesosOperatorAgentDriver',
)
| from .interface import Scheduler, Executor, OperatorMaster
from .scheduler import MesosSchedulerDriver
from .executor import MesosExecutorDriver
from .operator_v1 import MesosOperatorMasterDriver, MesosOperatorAgentDriver
from .utils import encode_data, decode_data
__VERSION__ = '0.3.6'
__all__ = (
'Scheduler',
'MesosSchedulerDriver',
'Executor',
'MesosExecutorDriver',
'encode_data',
'decode_data',
'OperatorMaster',
'MesosOperatorMasterDriver',
'MesosOperatorAgentDriver',
)
| bsd-3-clause | Python |
4a1351c546967701327adc17a704ccedfb15abdd | Update version to 1.3-20 (#5615) | tmerrick1/spack,EmreAtes/spack,mfherbst/spack,krafczyk/spack,mfherbst/spack,mfherbst/spack,LLNL/spack,mfherbst/spack,matthiasdiener/spack,LLNL/spack,krafczyk/spack,lgarren/spack,mfherbst/spack,tmerrick1/spack,skosukhin/spack,skosukhin/spack,iulian787/spack,LLNL/spack,tmerrick1/spack,skosukhin/spack,skosukhin/spack,EmreAtes/spack,krafczyk/spack,lgarren/spack,lgarren/spack,matthiasdiener/spack,LLNL/spack,lgarren/spack,matthiasdiener/spack,EmreAtes/spack,tmerrick1/spack,skosukhin/spack,LLNL/spack,matthiasdiener/spack,tmerrick1/spack,lgarren/spack,iulian787/spack,iulian787/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,matthiasdiener/spack,EmreAtes/spack,iulian787/spack,EmreAtes/spack | var/spack/repos/builtin/packages/r-boot/package.py | var/spack/repos/builtin/packages/r-boot/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBoot(RPackage):
"""Functions and datasets for bootstrapping from the book "Bootstrap
Methods and Their Application" by A. C. Davison and D. V. Hinkley (1997,
CUP), originally written by Angelo Canty for S."""
homepage = "https://cran.r-project.org/package=boot"
url = "https://cran.r-project.org/src/contrib/boot_1.3-18.tar.gz"
list_url = homepage
version('1.3-20', 'bb879fb4204a4f94ab82c98dd1ad5eca')
version('1.3-18', '711dd58af14e1027eb8377d9202e9b6f')
| ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBoot(RPackage):
"""Functions and datasets for bootstrapping from the book "Bootstrap
Methods and Their Application" by A. C. Davison and D. V. Hinkley (1997,
CUP), originally written by Angelo Canty for S."""
homepage = "https://cran.r-project.org/package=boot"
url = "https://cran.r-project.org/src/contrib/boot_1.3-18.tar.gz"
version('1.3-18', '711dd58af14e1027eb8377d9202e9b6f')
| lgpl-2.1 | Python |
859b9972d37e742d2d6187c694f2e757fbd00897 | Update version to 2.1.14 (#5856) | mfherbst/spack,iulian787/spack,EmreAtes/spack,lgarren/spack,matthiasdiener/spack,iulian787/spack,EmreAtes/spack,LLNL/spack,krafczyk/spack,matthiasdiener/spack,skosukhin/spack,LLNL/spack,matthiasdiener/spack,tmerrick1/spack,skosukhin/spack,LLNL/spack,skosukhin/spack,tmerrick1/spack,mfherbst/spack,tmerrick1/spack,matthiasdiener/spack,lgarren/spack,matthiasdiener/spack,mfherbst/spack,lgarren/spack,EmreAtes/spack,tmerrick1/spack,tmerrick1/spack,lgarren/spack,lgarren/spack,mfherbst/spack,iulian787/spack,LLNL/spack,iulian787/spack,skosukhin/spack,krafczyk/spack,EmreAtes/spack,LLNL/spack,skosukhin/spack,krafczyk/spack,EmreAtes/spack,krafczyk/spack,krafczyk/spack,iulian787/spack,mfherbst/spack | var/spack/repos/builtin/packages/r-yaml/package.py | var/spack/repos/builtin/packages/r-yaml/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RYaml(RPackage):
"""This package implements the libyaml YAML 1.1 parser and emitter
(http://pyyaml.org/wiki/LibYAML) for R."""
homepage = "https://cran.r-project.org/web/packages/yaml/index.html"
url = "https://cran.r-project.org/src/contrib/yaml_2.1.13.tar.gz"
list_url = homepage
version('2.1.14', '2de63248e6a122c368f8e4537426e35c')
version('2.1.13', 'f2203ea395adaff6bd09134666191d9a')
| ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RYaml(RPackage):
"""This package implements the libyaml YAML 1.1 parser and emitter
(http://pyyaml.org/wiki/LibYAML) for R."""
homepage = "https://cran.r-project.org/web/packages/yaml/index.html"
url = "https://cran.r-project.org/src/contrib/yaml_2.1.13.tar.gz"
list_url = homepage
version('2.1.13', 'f2203ea395adaff6bd09134666191d9a')
| lgpl-2.1 | Python |
cafda2ccd8933d3e4b7a5605c95a2328d296b610 | Bump version | guildai/guild,guildai/guild,guildai/guild,guildai/guild | guild/__init__.py | guild/__init__.py | # Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.3.0.dev4"
__requires__ = [
# (<required module>, <distutils package req>)
("pip", "pip"),
("yaml", "PyYAML"),
("setuptools", "setuptools"),
("tabview", "tabview"),
("twine", "twine"),
("werkzeug", "Werkzeug"),
("whoosh", "Whoosh"),
]
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
commit = _git_cmd("git -C \"%(repo)s\" log -1 --oneline | cut -d' ' -f1")
globals()["__git_commit__"] = commit
def _init_git_status():
raw = _git_cmd("git -C \"%(repo)s\" status -s")
globals()["__git_status__"] = raw.split("\n") if raw else []
def _git_cmd(cmd, **kw):
repo = os.path.dirname(__file__)
cmd = cmd % dict(repo=repo, **kw)
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
git_commit = globals().get("__git_commit__")
if git_commit:
git_status = globals().get("__git_status__", [])
workspace_changed_marker = "*" if git_status else ""
return "%s (dev %s%s)" % (__version__, git_commit,
workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
| # Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.3.0.dev2"
__requires__ = [
# (<required module>, <distutils package req>)
("pip", "pip"),
("yaml", "PyYAML"),
("setuptools", "setuptools"),
("tabview", "tabview"),
("twine", "twine"),
("werkzeug", "Werkzeug"),
("whoosh", "Whoosh"),
]
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
commit = _git_cmd("git -C \"%(repo)s\" log -1 --oneline | cut -d' ' -f1")
globals()["__git_commit__"] = commit
def _init_git_status():
raw = _git_cmd("git -C \"%(repo)s\" status -s")
globals()["__git_status__"] = raw.split("\n") if raw else []
def _git_cmd(cmd, **kw):
repo = os.path.dirname(__file__)
cmd = cmd % dict(repo=repo, **kw)
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
git_commit = globals().get("__git_commit__")
if git_commit:
git_status = globals().get("__git_status__", [])
workspace_changed_marker = "*" if git_status else ""
return "%s (dev %s%s)" % (__version__, git_commit,
workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
| apache-2.0 | Python |
114e658bcc13e7b8b8eb2c6c56915310baec2cd0 | hide tasks without condition | DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo,rdmorganiser/rdmo,rdmorganiser/rdmo | apps/tasks/managers.py | apps/tasks/managers.py | from django.db import models
class TaskManager(models.Manager):
def active_by_project(self, project):
tasks = []
for task in self.get_queryset():
conditions = task.conditions.all()
if conditions:
for condition in conditions:
if condition.resolve(project):
tasks.append(task)
break
if hasattr(task, 'timeframe'):
task.timeframe.dates = task.timeframe.get_dates(project)
return tasks
| from django.db import models
class TaskManager(models.Manager):
def active_by_project(self, project):
tasks = []
for task in self.get_queryset():
conditions = task.conditions.all()
if conditions:
for condition in conditions:
if condition.resolve(project):
tasks.append(task)
break
else:
tasks.append(task)
if hasattr(task, 'timeframe'):
task.timeframe.dates = task.timeframe.get_dates(project)
return tasks
| apache-2.0 | Python |
9cd217e0e39fb53ca002b18904921351dd52e234 | Fix GetOverlappedResult WriteFile tests per MSDN docs | opalmer/pywincffi,opalmer/pywincffi,opalmer/pywincffi,opalmer/pywincffi | tests/test_kernel32/test_overlapped.py | tests/test_kernel32/test_overlapped.py | import os
import shutil
import tempfile
from six import text_type
from pywincffi.dev.testutil import TestCase
from pywincffi.core import dist
from pywincffi.kernel32 import (
CreateFile, WriteFile, CloseHandle, CreateEvent, GetOverlappedResult)
from pywincffi.wintypes import OVERLAPPED
class TestOverlappedWriteFile(TestCase):
"""
Tests for :func:`pywincffi.kernel32.GetOverlappedResult`
"""
def test_overlapped_write_file(self):
# Test outline:
# - Create a temp dir.
# - CreateFile for writing with FILE_FLAG_OVERLAPPED.
# - WriteFile in overlapped mode.
# - Use GetOverlappedResult to wait for IO completion.
temp_dir = tempfile.mkdtemp(prefix="pywincffi-test-ovr-")
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filename = text_type(os.path.join(temp_dir, "overlapped-write-file"))
file_contents = b"hello overlapped world"
_, lib = dist.load()
handle = CreateFile(
lpFileName=filename,
dwDesiredAccess=lib.GENERIC_WRITE,
dwCreationDisposition=lib.CREATE_NEW,
dwFlagsAndAttributes=lib.FILE_FLAG_OVERLAPPED,
)
# Prepare overlapped write
ovr = OVERLAPPED()
ovr.hEvent = CreateEvent(bManualReset=True, bInitialState=False)
# Go for overlapped WriteFile. Should result in:
# - num_bytes_written == 0
# - GetLastError() == ERROR_IO_PENDING
# HOWEVER, https://msdn.microsoft.com/en-us/library/aa365683 states:
# "Further, the WriteFile function will sometimes return TRUE with a
# GetLastError value of ERROR_SUCCESS, even though it is using an
# asynchronous handle (which can also return FALSE with
# ERROR_IO_PENDING).
# Test strategy:
# - Disregard WriteFile return result.
# - Assert GetLastError is either ERROR_IO_PENDING or ERROR_SUCCESS.
# - Later validate that the correct number of bytes was written.
_ = WriteFile(handle, file_contents, lpOverlapped=ovr)
error_code, _ = self.GetLastError()
self.assertIn(error_code, (lib.ERROR_IO_PENDING, 0))
# Reset last error so that TestCase cleanups don't error out.
self.SetLastError(0)
# Block until async write is completed.
num_bytes_written = GetOverlappedResult(handle, ovr, bWait=True)
self.assertEqual(num_bytes_written, len(file_contents))
CloseHandle(handle)
| import os
import shutil
import tempfile
from six import text_type
from pywincffi.dev.testutil import TestCase
from pywincffi.core import dist
from pywincffi.kernel32 import (
CreateFile, WriteFile, CloseHandle, CreateEvent, GetOverlappedResult)
from pywincffi.wintypes import OVERLAPPED
class TestOverlappedWriteFile(TestCase):
"""
Tests for :func:`pywincffi.kernel32.GetOverlappedResult`
"""
def test_overlapped_write_file(self):
# Test outline:
# - Create a temp dir.
# - CreateFile for writing with FILE_FLAG_OVERLAPPED.
# - WriteFile in overlapped mode.
# - Use GetOverlappedResult to wait for IO completion.
temp_dir = tempfile.mkdtemp(prefix="pywincffi-test-ovr-")
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filename = text_type(os.path.join(temp_dir, "overlapped-write-file"))
file_contents = b"hello overlapped world"
_, lib = dist.load()
handle = CreateFile(
lpFileName=filename,
dwDesiredAccess=lib.GENERIC_WRITE,
dwCreationDisposition=lib.CREATE_NEW,
dwFlagsAndAttributes=lib.FILE_FLAG_OVERLAPPED,
)
# Prepare overlapped write
ovr = OVERLAPPED()
ovr.hEvent = CreateEvent(bManualReset=True, bInitialState=False)
# Go for overlapped WriteFile. Should result in:
# - num_bytes_written == 0
# - GetLastError() == ERROR_IO_PENDING
num_bytes_written = WriteFile(handle, file_contents, lpOverlapped=ovr)
self.assertEqual(num_bytes_written, 0)
error_code, _ = self.GetLastError()
self.assertEqual(error_code, lib.ERROR_IO_PENDING)
# Reset last error so that TestCase cleanups don't error out.
self.SetLastError(0)
# Block until async write is completed.
num_bytes_written = GetOverlappedResult(handle, ovr, bWait=True)
self.assertEqual(num_bytes_written, len(file_contents))
CloseHandle(handle)
| mit | Python |
49d4031fb6ac08060b0d427bd8ac3345422c7b13 | Update license year | uber/tchannel-python,uber/tchannel-python | tests/thrift/test_multiple_services.py | tests/thrift/test_multiple_services.py | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
| # Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
| mit | Python |
a5ff46da6c0d5941a979566c5d0f97cd5f375c99 | Add key type selection | bbockelm/glideinWMS,holzman/glideinwms-old,holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS,bbockelm/glideinWMS,holzman/glideinwms-old | tools/wmsTxtView.py | tools/wmsTxtView.py | #!/bin/env python
#
# Description:
# This tool displays the status of the glideinWMS pool
# in a text format
#
# Arguments:
# [-pool collector_node] Entries|Sites|Gatekeepers
#
# Author:
# Igor Sfiligoi (May 9th 2007)
#
import string
import sys
sys.path.append("../factory")
sys.path.append("../frontend")
sys.path.append("../lib")
import glideFactoryInterface
import glideinFrontendInterface
pool_name=None
remove_condor_stats=True
remove_internals=True
txt_type='Entries'
# parse arguments
alen=len(sys.argv)
i=1
while (i<alen):
ael=sys.argv[i]
if ael=='-pool':
i=i+1
pool_name=sys.argv[i]
elif ael in ('Entries','Sites','Gatekeepers'):
txt_type=ael
else:
raise RuntimeError,"Unknown option '%s'"%ael
i=i+1
# get data
glideins_obj=glideinFrontendInterface.findGlideins(pool_name)
# Get a dictionary of
# RequestedIdle
# Idle
# Running
txt_data={}
# extract data
glideins=glideins_obj.keys()
for glidein in glideins:
glidein_el=glideins_obj[glidein]
if txt_type=='Entries':
key=glidein
elif txt_type=='Sites':
key=glidein_el['attrs']['GLIDEIN_Site']
elif txt_type=='Gatekeeper':
key=glidein_el['attrs']['GLIDEIN_Gatekeeper']
else:
raise RuntimeError, "Unknwon type '%s'"%txt_type
if txt_data.has_key(key):
key_el=txt_data[key]
else:
key_el={'RequestedIdle':0,'Idle':0,'Running':0,'MaxRunning':0}
txt_data[key]=key_el
if glidein_el.has_key('monitor'):
key_el['RequestedIdle']+=glidein_el['monitor']['TotalRequestedIdle']
key_el['Idle']+=glidein_el['monitor']['TotalStatusIdle']
key_el['Running']+=glidein_el['monitor']['TotalStatusRunning']
key_el['MaxRunning']+=glidein_el['monitor']['TotalRequestedMaxRun']
#print data
txt_keys=txt_data.keys()
txt_keys.sort()
print '%s ReqIdle Idle Running MaxRun'%string.ljust('Entry',48)
print '================================================-=======-=======-=======-======='
for key in txt_keys:
key_el=txt_data[key]
print "%s %7i %7i %7i %7i"%(string.ljust(key,48),key_el['RequestedIdle'],key_el['Idle'],key_el['Running'],key_el['MaxRunning'])
| #!/bin/env python
#
# Description:
# This tool displays the status of the glideinWMS pool
# in a text format
#
# Arguments:
# [-pool collector_node] Entries|Sites|Gatekeepers
#
# Author:
# Igor Sfiligoi (May 9th 2007)
#
import string
import sys
sys.path.append("../factory")
sys.path.append("../frontend")
sys.path.append("../lib")
import glideFactoryInterface
import glideinFrontendInterface
pool_name=None
remove_condor_stats=True
remove_internals=True
txt_type=None
# parse arguments
alen=len(sys.argv)
i=1
while (i<alen):
ael=sys.argv[i]
if ael=='-pool':
i=i+1
pool_name=sys.argv[i]
elif ael in ('Entries','Sites','Gatekeepers'):
txt_type=ael
else:
raise RuntimeError,"Unknown option '%s'"%ael
i=i+1
# get data
glideins_obj=glideinFrontendInterface.findGlideins(pool_name)
# Get a dictionary of
# RequestedIdle
# Idle
# Running
txt_data={}
# extract data
glideins=glideins_obj.keys()
for glidein in glideins:
glidein_el=glideins_obj[glidein]
key=glidein
if txt_data.has_key(key):
key_el=txt_data[key]
else:
key_el={'RequestedIdle':0,'Idle':0,'Running':0,'MaxRunning':0}
txt_data[key]=key_el
if glidein_el.has_key('monitor'):
key_el['RequestedIdle']+=glidein_el['monitor']['TotalRequestedIdle']
key_el['Idle']+=glidein_el['monitor']['TotalStatusIdle']
key_el['Running']+=glidein_el['monitor']['TotalStatusRunning']
key_el['MaxRunning']+=glidein_el['monitor']['TotalRequestedMaxRun']
#print data
txt_keys=txt_data.keys()
txt_keys.sort()
print '%s ReqIdle Idle Running MaxRun'%string.ljust('Entry',48)
print '================================================-=======-=======-=======-======='
for key in txt_keys:
key_el=txt_data[key]
print "%s %7i %7i %7i %7i"%(string.ljust(key,48),key_el['RequestedIdle'],key_el['Idle'],key_el['Running'],key_el['MaxRunning'])
| bsd-3-clause | Python |
3566bb6f5f7a2fb48c485fbad7b9961aa061acba | Update wp2pelican script to extract date and author taken from wordpress xml file. | btnpushnmunky/pelican,GiovanniMoretti/pelican,jvehent/pelican,florianjacob/pelican,treyhunner/pelican,levanhien8/pelican,iKevinY/pelican,ionelmc/pelican,abrahamvarricatt/pelican,kennethlyn/pelican,lucasplus/pelican,lazycoder-ru/pelican,rbarraud/pelican,ehashman/pelican,deved69/pelican-1,kernc/pelican,sunzhongwei/pelican,Summonee/pelican,Natim/pelican,douglaskastle/pelican,deanishe/pelican,joetboole/pelican,lazycoder-ru/pelican,ls2uper/pelican,number5/pelican,kennethlyn/pelican,JeremyMorgan/pelican,abrahamvarricatt/pelican,number5/pelican,liyonghelpme/myBlog,simonjj/pelican,ehashman/pelican,treyhunner/pelican,deanishe/pelican,avaris/pelican,jo-tham/pelican,gymglish/pelican,UdeskDeveloper/pelican,lucasplus/pelican,jimperio/pelican,ls2uper/pelican,koobs/pelican,liyonghelpme/myBlog,iurisilvio/pelican,jo-tham/pelican,rbarraud/pelican,zackw/pelican,kernc/pelican,goerz/pelican,garbas/pelican,levanhien8/pelican,JeremyMorgan/pelican,lucasplus/pelican,florianjacob/pelican,simonjj/pelican,eevee/pelican,garbas/pelican,avaris/pelican,crmackay/pelican,iurisilvio/pelican,Summonee/pelican,jvehent/pelican,florianjacob/pelican,TC01/pelican,kennethlyn/pelican,douglaskastle/pelican,karlcow/pelican,Rogdham/pelican,Scheirle/pelican,levanhien8/pelican,jvehent/pelican,ingwinlu/pelican,sunzhongwei/pelican,alexras/pelican,Summonee/pelican,karlcow/pelican,goerz/pelican,HyperGroups/pelican,iurisilvio/pelican,gymglish/pelican,farseerfc/pelican,51itclub/pelican,eevee/pelican,Scheirle/pelican,ls2uper/pelican,zackw/pelican,justinmayer/pelican,getpelican/pelican,sunzhongwei/pelican,UdeskDeveloper/pelican,HyperGroups/pelican,arty-name/pelican,rbarraud/pelican,catdog2/pelican,btnpushnmunky/pelican,Rogdham/pelican,liyonghelpme/myBlog,51itclub/pelican,Polyconseil/pelican,GiovanniMoretti/pelican,Scheirle/pelican,Polyconseil/pelican,0xMF/pelican,catdog2/pelican,douglaskastle/pelican,janaurka/git-debug-presentiation,51itclub/pelican,janaurka/git-debug-presentiation,ingwinlu/pelican,getpelican/pelican,deanishe/pelican,janaurka/git-debug-presentiation,catdog2/pelican,11craft/pelican,alexras/pelican,liyonghelpme/myBlog,joetboole/pelican,fbs/pelican,abrahamvarricatt/pelican,kernc/pelican,Rogdham/pelican,goerz/pelican,koobs/pelican,crmackay/pelican,number5/pelican,gymglish/pelican,zackw/pelican,eevee/pelican,joetboole/pelican,JeremyMorgan/pelican,jimperio/pelican,deved69/pelican-1,garbas/pelican,alexras/pelican,iKevinY/pelican,ehashman/pelican,talha131/pelican,farseerfc/pelican,UdeskDeveloper/pelican,jimperio/pelican,simonjj/pelican,karlcow/pelican,btnpushnmunky/pelican,11craft/pelican,liyonghelpme/myBlog,treyhunner/pelican,sunzhongwei/pelican,HyperGroups/pelican,11craft/pelican,koobs/pelican,GiovanniMoretti/pelican,TC01/pelican,lazycoder-ru/pelican,TC01/pelican,talha131/pelican,deved69/pelican-1,crmackay/pelican | tools/wp2pelican.py | tools/wp2pelican.py | #! /usr/bin/env python
from BeautifulSoup import BeautifulStoneSoup
from codecs import open
import os
import argparse
def wp2html(xml):
xmlfile = open(xml, encoding='utf-8').read()
soup = BeautifulStoneSoup(xmlfile)
items = soup.rss.channel.findAll('item')
for item in items:
if item.fetch('wp:status')[0].contents[0] == "publish":
title = item.title.contents[0]
content = item.fetch('content:encoded')[0].contents[0]
filename = item.fetch('wp:post_name')[0].contents[0]
date = item.fetch('wp:post_date')[0].contents[0]
author = item.fetch('dc:creator')[0].contents[0].title()
yield (title, content, filename, date, author)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""Transform a wordpress xml export into rst files """)
parser.add_argument(dest='xml', help='The xml filepath')
parser.add_argument('-o', '--output', dest='output', default='output', help='Output path')
args = parser.parse_args()
for title, content, filename, date, author in wp2html(args.xml):
html_filename = os.path.join(args.output, filename+'.html')
rst_filename = os.path.join(args.output, filename+'.rst')
with open(html_filename, 'w', encoding='utf-8') as fp:
fp.write(content)
os.system('pandoc --from=html --to=rst -o %s %s' % (rst_filename,
html_filename))
with open(rst_filename, 'r', encoding='utf-8') as fs:
content = fs.read()
with open(rst_filename, 'w', encoding='utf-8') as fs:
header = '%s\n%s\n\n:date: %s\n:author: %s\n\n' % (title, '#' * len(title) ,date, author)
fs.write(header+content)
| #! /usr/bin/env python
from BeautifulSoup import BeautifulStoneSoup
from codecs import open
import os
import argparse
def wp2html(xml):
xmlfile = open(xml, encoding='utf-8').read()
soup = BeautifulStoneSoup(xmlfile)
items = soup.rss.channel.findAll('item')
for item in items:
if item.fetch('wp:status')[0].contents[0] == "publish":
title = item.title.contents[0]
content = item.fetch('content:encoded')[0].contents[0]
filename = item.fetch('wp:post_name')[0].contents[0]
yield (title, content, filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""Transform a wordpress xml export into rst files """)
parser.add_argument(dest='xml', help='The xml filepath')
parser.add_argument('-o', '--output', dest='output', default='output', help='Output path')
args = parser.parse_args()
for title, content, filename in wp2html(args.xml):
html_filename = os.path.join(args.output, filename+'.html')
rst_filename = os.path.join(args.output, filename+'.rst')
with open(html_filename, 'w', encoding='utf-8') as fp:
fp.write(content)
os.system('pandoc --from=html --to=rst -o %s %s' % (rst_filename,
html_filename))
| agpl-3.0 | Python |
da389d740e0a13ca4ac8ba4132976fe2ca8cff53 | fix err check | syscoin/syscoin,syscoin/syscoin,syscoin/syscoin,syscoin/syscoin,syscoin/syscoin,syscoin/syscoin,syscoin/syscoin,syscoin/syscoin | test/functional/feature_asset_burn.py | test/functional/feature_asset_burn.py | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.messages import COIN
class AssetBurnTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 240
self.extra_args = [['-assetindex=1'],['-assetindex=1']]
def run_test(self):
self.nodes[0].generate(200)
self.sync_blocks()
self.basic_burn_syscoin()
self.basic_audittxroot1()
def basic_burn_syscoin(self):
self.basic_asset()
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
self.nodes[0].assetsend(self.asset, self.nodes[1].getnewaddress(), int(0.5*COIN))
self.nodes[0].generate(1)
self.sync_blocks()
out = self.nodes[1].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
# try to burn more than we own
assert_raises_rpc_error(-4, 'Insufficient funds', self.nodes[1].assetallocationburn(self.asset, int(0.6*COIN), '0x931d387731bbbc988b312206c74f77d004d6b84b'))
self.nodes[1].assetallocationburn(self.asset, int(0.5*COIN), '0x931d387731bbbc988b312206c74f77d004d6b84b')
self.nodes[0].generate(1)
self.sync_blocks()
out = self.nodes[1].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
def basic_asset(self):
self.asset = self.nodes[0].assetnew('1', 'TST', 'asset description', '0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46', 8, 1000*COIN, 10000*COIN, 31, {})['asset_guid']
if __name__ == '__main__':
AssetBurnTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.messages import COIN
class AssetBurnTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 240
self.extra_args = [['-assetindex=1'],['-assetindex=1']]
def run_test(self):
self.nodes[0].generate(200)
self.sync_blocks()
self.basic_burn_syscoin()
self.basic_audittxroot1()
def basic_burn_syscoin(self):
self.basic_asset()
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
self.nodes[0].assetsend(self.asset, self.nodes[1].getnewaddress(), int(0.5*COIN))
self.nodes[0].generate(1)
self.sync_blocks()
out = self.nodes[1].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
# try to burn more than we own
assert_raises_rpc_error(-20, 'Failed to read from asset DB', self.nodes[1].assetallocationburn(self.asset, int(0.6*COIN), '0x931d387731bbbc988b312206c74f77d004d6b84b'))
self.nodes[1].assetallocationburn(self.asset, int(0.5*COIN), '0x931d387731bbbc988b312206c74f77d004d6b84b')
self.nodes[0].generate(1)
self.sync_blocks()
out = self.nodes[1].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
def basic_asset(self):
self.asset = self.nodes[0].assetnew('1', 'TST', 'asset description', '0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46', 8, 1000*COIN, 10000*COIN, 31, {})['asset_guid']
if __name__ == '__main__':
AssetBurnTest().main()
| mit | Python |
0d6a5273e8ee6700b81f7ea00b085e1aec264e2b | Make sure that the catch-all urlpattern in projects is at the bottom of urlpatterns in urls.py | mozilla/mozilla-ignite,mozilla/betafarm,mozilla/betafarm,mozilla/mozilla-ignite,mozilla/mozilla-ignite,mozilla/betafarm,mozilla/betafarm,mozilla/mozilla-ignite | urls.py | urls.py | from django.conf import settings
from django.conf.urls.defaults import patterns, include
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^browserid/', include('django_browserid.urls')),
(r'^events', include('events.urls')),
(r'^topics', include('topics.urls')),
(r'', include('innovate.urls')),
(r'', include('users.urls')),
(r'', include('projects.urls')),
)
# Handle 404 and 500 errors
handler404 = 'innovate.views.handle404'
handler500 = 'innovate.views.handle500'
## In DEBUG mode, serve media files through Django.
if settings.DEBUG:
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
urlpatterns += patterns('',
(r'^mockups/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': 'mockups',
'show_indexes': True,
})
)
| from django.conf import settings
from django.conf.urls.defaults import patterns, include
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^browserid/', include('django_browserid.urls')),
(r'', include('projects.urls')),
(r'', include('innovate.urls')),
(r'', include('users.urls')),
(r'^topics', include('topics.urls')),
(r'^events', include('events.urls'))
)
# Handle 404 and 500 errors
handler404 = 'innovate.views.handle404'
handler500 = 'innovate.views.handle500'
## In DEBUG mode, serve media files through Django.
if settings.DEBUG:
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
urlpatterns += patterns('',
(r'^mockups/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': 'mockups',
'show_indexes': True,
})
)
| bsd-3-clause | Python |
1b494d4d4b462ca2f68b072ebb078c0f2b00dc71 | fix urls | suvit/django-tinymce-images,suvit/django-tinymce-images | urls.py | urls.py | # -*- encoding: utf-8 -*-
from django.conf.urls.defaults import *
url_prefix = 'tiny_mce/images/'
urlpatterns = patterns('tinymce_images.view',
#url(r'download/$', tiny_views.download),
#url(r'^$', tiny_views.all),
url(r'^new_folder/(?P<name>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'new_folder', {}, 'new_folder'),
url(r'^show_path/(?P<type>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'show_path', {}, 'show_path'),
url(r'^show_tree/(?P<type>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'show_tree', {}, 'show_tree'),
url(r'^show_dir/(?P<type>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'show_dir', {}, 'show_dir'),
url(r'^del_folder/(?P<path>[a-zA-Z0-9_/]*)$', 'del_folder', {}, 'del_folder'),
url(r'^upload_file/$', 'upload_file', {}, 'upload_file'),
url(r'^del_file/$', 'del_file', {}, 'del_file'),
url(r'^sid/$', 'sid', {}, 'sid'),
)
urlpatterns = patterns("",
url(r'^%s' % url_prefix, include(urlpatterns)),
url(r'^%sconnector/$' % url_prefix,
'django.views.generic.simple.direct_to_template',
{'template': 'connector_url.js',
'mimetype': 'text/javascript',
'connector_url': '/%s' % url_prefix},
name='connector_url' ),
)
| # -*- encoding: utf-8 -*-
from django.conf.urls.defaults import *
url_prefix = 'tiny_mce/images/'
urlpatterns = patterns('tinymce_images.view',
#url(r'download/$', tiny_views.download),
#url(r'^$', tiny_views.all),
url(r'^new_folder/(?P<name>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'new_folder', {}, 'new_folder'),
url(r'^show_path/(?P<type>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'show_path', {}, 'show_path'),
url(r'^show_tree/(?P<type>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'show_tree', {}, 'show_tree'),
url(r'^show_dir/(?P<type>\w+)/(?P<path>[a-zA-Z0-9_/]*)$', 'show_dir', {}, 'show_dir'),
url(r'^del_folder/(?P<path>[a-zA-Z0-9_/]*)$', 'del_folder', {}, 'del_folder'),
url(r'^upload_file/$', 'upload_file', {}, 'upload_file'),
url(r'^del_file/$', 'del_file', {}, 'del_file'),
url(r'^sid/$', 'sid', {}, 'sid'),
url(r'^connector/$',
'django.views.generic.simple.direct_to_template',
{'template': 'connector_url.js',
'mimetype': 'text/javascript',
'connector_url': '/%s' % url_prefix},
name='connector_url' ),
)
urlpatterns = patterns("",
url(r'^%s' % url_prefix, include(urlpatterns)),
)
| mit | Python |
ce7bddf80dc58e1fa7e4fefe7890c9c23e549c29 | Add datetime function to convert date into a numerical format. | weidnem/IntroPython2016,UWPCE-PythonCert/IntroPython2016,UWPCE-PythonCert/IntroPython2016,UWPCE-PythonCert/IntroPython2016,weidnem/IntroPython2016,weidnem/IntroPython2016 | students/psbriant/final_project/clean_data.py | students/psbriant/final_project/clean_data.py | """
Name: Paul Briant
Date: 12/11/16
Class: Introduction to Python
Assignment: Final Project
Description:
Code for Final Project
"""
import pandas
import matplotlib.pyplot as plt
from datetime import datetime
def clean(data):
"""
Take in data and return cleaned version.
"""
# Remove Date Values column
data = data.drop(["Date Value"], axis=1)
column_names = list(data.columns.values)
data.columns = rename_columns(column_names)
# Modify date format
first_date = data.Date.values[0]
print(datetime.strptime(first_date, "%b_%Y"))
# Assign dates as the index
data.index = data.Date
return data
def rename_columns(names):
"""
Renames the date column and adds all columns into a list so they can be
accessed by dot notation.
"""
columns_list = []
for name in names:
if name == "Date Text":
columns_list.append("Date")
else:
columns_list.append(name)
return columns_list
def find_low_water_use(data):
"""
"""
under100 = data[(data["90012"] <= 100) & (data["90013"] <= 100)]
print(under100)
under25 = data[(data["90012"] <= 25) & (data["90013"] <= 25)]
print(under25)
def plot_zipcode(data, zipcode):
"""
Plot water use data for a specified zipcode
"""
# data["90012"].plot(kind="bar", rot=10)
plt.plot(data[zipcode])
plt.show()
def main():
"""
"""
# Connect to file.
data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv")
cleaned_data = clean(data)
# find_low_water_use(cleaned_data)
plot_zipcode(cleaned_data, "90012")
# cleaned_data["90012"].plot(kind="bar", rot=10)
# cleaned_data["90012"].hist()
# plt.plot(cleaned_data["90012"])
# plt.plot([1, 2, 3, 4])
if __name__ == '__main__':
main()
| """
Name: Paul Briant
Date: 12/11/16
Class: Introduction to Python
Assignment: Final Project
Description:
Code for Final Project
"""
import pandas
import matplotlib.pyplot as plt
from datetime import datetime
def clean(data):
"""
Take in data and return cleaned version.
"""
# Remove Date Values column
data = data.drop(["Date Value"], axis=1)
column_names = list(data.columns.values)
data.columns = rename_columns(column_names)
print(data.columns)
# Assign dates as the index
data.index = data.Date
return data
def rename_columns(names):
"""
Renames the date column and adds all columns into a list so they can be
accessed by dot notation.
"""
columns_list = []
for name in names:
if name == "Date Text":
columns_list.append("Date")
else:
columns_list.append(name)
return columns_list
def find_low_water_use(data):
"""
"""
under100 = data[(data["90012"] <= 100) & (data["90013"] <= 100)]
print(under100)
under25 = data[(data["90012"] <= 25) & (data["90013"] <= 25)]
print(under25)
def plot_zipcode(data, zipcode):
"""
Plot water use data for a specified zipcode
"""
# data["90012"].plot(kind="bar", rot=10)
plt.plot(data[zipcode])
plt.show()
def main():
"""
"""
# Connect to file.
data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv")
cleaned_data = clean(data)
# find_low_water_use(cleaned_data)
plot_zipcode(cleaned_data, "90012")
# cleaned_data["90012"].plot(kind="bar", rot=10)
# cleaned_data["90012"].hist()
# plt.plot(cleaned_data["90012"])
# plt.plot([1, 2, 3, 4])
if __name__ == '__main__':
main()
| unlicense | Python |
9017e170f6d689bc0aae7c9d0c3ed3ea72f7eac5 | update all other commands to new method | Jeikko/Redball-Cogs | arkserver/arkserver.py | arkserver/arkserver.py | import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help
import os
import asyncio
from subprocess import PIPE, run
def out(command):
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
return result.stdout
class arkserver:
"""Ark Server commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def checkupdate(self):
"""Checks for ark updates"""
output = out("arkmanager checkupdate")
await self.bot.say("{0}".format(output))
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def arkrestart(self):
"""Restarts the ARK Server"""
output = out("arkmanager restart")
await self.bot.say("{0}".format(output))
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def arkupdate(self):
"""Stops the ARK Server, installs updates, then reboots"""
output = out("arkmanager update --update-mods")
await self.bot.say("{0}".format(output))
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def broadcast(self, ctx, text):
"""Sends a message ingame"""
output = out("arkmanager broadcast" + " " + text)
def setup(bot):
n = arkserver(bot)
bot.add_cog(n)
| import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help
import os
import asyncio
from subprocess import PIPE, run
def out(command):
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
return result.stdout
class arkserver:
"""Ark Server commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def checkupdate(self):
"""Checks for ark updates"""
output = out("arkmanager checkupdate")
await self.bot.say("{0}".format(output))
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def arkrestart(self):
"""Restarts the ARK Server"""
os.system("arkmanager restart")
await self.bot.say("Server restarted.")
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def arkupdate(self):
"""Stops the ARK Server, installs updates, then reboots"""
os.system("arkmanager update --update-mods")
await self.bot.say("Attempting to update..")
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_webhooks=True)
async def broadcast(self):
"""Sends a message ingame"""
os.system("arkmanager broadcast")
def setup(bot):
n = arkserver(bot)
bot.add_cog(n)
| mit | Python |
88e0ec5ff58f7dabb531749472a410498c8e7827 | Use itertools.repeat over slower alternatives. | ZhukovAlexander/skiplist-python | py_skiplist/iterators.py | py_skiplist/iterators.py | from itertools import dropwhile, count, repeat
import random
def geometric(p):
return (next(dropwhile(lambda _: random.randint(1, int(1. / p)) == 1, count())) for _ in repeat(1))
# Simple deterministic distribution for testing internals of the skiplist.
uniform = repeat
| from itertools import dropwhile, count, cycle
import random
def geometric(p):
return (next(dropwhile(lambda _: random.randint(1, int(1. / p)) == 1, count())) for _ in cycle([1]))
def uniform(n):
"""
Simple deterministic distribution for testing internal of the skiplist
"""
return (n for _ in cycle([1]))
| mit | Python |
da7f7c352ab2a6b2879e7b9d229c0b78a73d64e8 | Bump version | laughingman7743/PyAthenaJDBC,laughingman7743/PyAthenaJDBC | pyathenajdbc/__init__.py | pyathenajdbc/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
__version__ = '1.0.5'
__athena_driver_version__ = '1.0.0'
# Globals https://www.python.org/dev/peps/pep-0249/#globals
apilevel = '2.0'
threadsafety = 3
paramstyle = 'pyformat'
ATHENA_JAR = 'AthenaJDBC41-{0}.jar'.format(__athena_driver_version__)
ATHENA_DRIVER_DOWNLOAD_URL = 'https://s3.amazonaws.com/athena-downloads/drivers/{0}'.format(
ATHENA_JAR)
ATHENA_DRIVER_CLASS_NAME = 'com.amazonaws.athena.jdbc.AthenaDriver'
ATHENA_CONNECTION_STRING = 'jdbc:awsathena://athena.{region}.amazonaws.com:443/'
class DBAPITypeObject:
"""Type Objects and Constructors
https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors
"""
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
STRING = DBAPITypeObject('CHAR', 'NCHAR',
'VARCHAR', 'NVARCHAR',
'LONGVARCHAR', 'LONGNVARCHAR')
BINARY = DBAPITypeObject('BINARY', 'VARBINARY', 'LONGVARBINARY')
NUMBER = DBAPITypeObject('BOOLEAN', 'TINYINT', 'SMALLINT', 'BIGINT', 'INTEGER',
'REAL', 'DOUBLE', 'FLOAT', 'DECIMAL', 'NUMERIC')
DATETIME = DBAPITypeObject('TIMESTAMP')
ROWID = DBAPITypeObject('')
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def connect(s3_staging_dir=None, access_key=None, secret_key=None,
region_name=None, profile_name=None, credential_file=None,
jvm_options=None, converter=None, formatter=None, jvm_path=None,
**kwargs):
from pyathenajdbc.connection import Connection
return Connection(s3_staging_dir, access_key, secret_key,
region_name, profile_name, credential_file,
jvm_options, converter, formatter, jvm_path,
**kwargs)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
__version__ = '1.0.4'
__athena_driver_version__ = '1.0.0'
# Globals https://www.python.org/dev/peps/pep-0249/#globals
apilevel = '2.0'
threadsafety = 3
paramstyle = 'pyformat'
ATHENA_JAR = 'AthenaJDBC41-{0}.jar'.format(__athena_driver_version__)
ATHENA_DRIVER_DOWNLOAD_URL = 'https://s3.amazonaws.com/athena-downloads/drivers/{0}'.format(
ATHENA_JAR)
ATHENA_DRIVER_CLASS_NAME = 'com.amazonaws.athena.jdbc.AthenaDriver'
ATHENA_CONNECTION_STRING = 'jdbc:awsathena://athena.{region}.amazonaws.com:443/'
class DBAPITypeObject:
"""Type Objects and Constructors
https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors
"""
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
STRING = DBAPITypeObject('CHAR', 'NCHAR',
'VARCHAR', 'NVARCHAR',
'LONGVARCHAR', 'LONGNVARCHAR')
BINARY = DBAPITypeObject('BINARY', 'VARBINARY', 'LONGVARBINARY')
NUMBER = DBAPITypeObject('BOOLEAN', 'TINYINT', 'SMALLINT', 'BIGINT', 'INTEGER',
'REAL', 'DOUBLE', 'FLOAT', 'DECIMAL', 'NUMERIC')
DATETIME = DBAPITypeObject('TIMESTAMP')
ROWID = DBAPITypeObject('')
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def connect(s3_staging_dir=None, access_key=None, secret_key=None,
region_name=None, profile_name=None, credential_file=None,
jvm_options=None, converter=None, formatter=None, jvm_path=None,
**kwargs):
from pyathenajdbc.connection import Connection
return Connection(s3_staging_dir, access_key, secret_key,
region_name, profile_name, credential_file,
jvm_options, converter, formatter, jvm_path,
**kwargs)
| mit | Python |
c201cfbf12286da220f0b656bbef256132554a66 | switch nose to pytest remocing class implementation | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | test/scripts/test_sequana_taxonomy.py | test/scripts/test_sequana_taxonomy.py | from sequana.scripts import taxonomy
from sequana import sequana_data
import pytest
prog = "sequana_taxonomy"
@pytest.fixture
def krakendb():
# todo
try:
taxonomy.main(["taxonomy", '--download', 'toydb'])
except SystemExit:
pass
def test_analysis(krakendb):
file1 = sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz")
file2 = sequana_data("Hm2_GTGAAA_L005_R2_001.fastq.gz")
# Test that database must be provided
try:
df = taxonomy.main([prog, '--file1', file1])
assert False
except:
assert True
from tempfile import TemporaryDirectory
directory = TemporaryDirectory()
df = taxonomy.main([prog, '--file1', file1, "--database", "toydb",
"--file2", file2, "--verbose", "--output-directory", directory.name])
from sequana import logger
logger.info(directory.name)
def test_help():
try:
taxonomy.main([prog, '--help', '1>/tmp/out', '2>/tmp/err'])
assert False
except SystemExit:
pass
else:
raise Exception
def _test_wrong_db():
try:
df = taxonomy.main([prog, "--database", "dummy"])
assert False
except:
assert True
| from sequana.scripts import taxonomy
from nose.plugins.attrib import attr
from sequana import sequana_data
class TestPipeline(object):
@classmethod
def setup_class(klass):
klass.prog = "sequana_taxonomy"
klass.params = {'prog': klass.prog}
def setUp(self):
try:
taxonomy.main(["taxonomy", '--download', 'toydb'])
except SystemExit:
pass
def test_help(self):
try:
taxonomy.main([self.prog, '--help', '1>/tmp/out', '2>/tmp/err'])
assert False
except SystemExit:
pass
else:
raise Exception
def test_input(self):
try:
df = taxonomy.main([self.prog, '--download', 'toydb'])
except SystemExit:
pass
def test_analysis(self):
file1 = sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz")
file2 = sequana_data("Hm2_GTGAAA_L005_R2_001.fastq.gz")
# Test that database must be provided
try:
df = taxonomy.main([self.prog, '--file1', file1])
assert False
except:
assert True
from tempfile import TemporaryDirectory
directory = TemporaryDirectory()
df = taxonomy.main([self.prog, '--file1', file1, "--database", "toydb",
"--file2", file2, "--verbose", "--output-directory", directory.name])
from sequana import logger
logger.info(directory.name)
# cleanup
def _test_wrong_db(self):
try:
df = taxonomy.main([self.prog, "--database", "dummy"])
assert False
except:
assert True
| bsd-3-clause | Python |
ba22bc23bf5d145e543a2f1b08fc0d6ea9247710 | Fix chunk alignment | matwey/pybeam | pybeam/beam_construct.py | pybeam/beam_construct.py | #
# Copyright (c) 2013 Matwey V. Kornilov <matwey.kornilov@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from construct import *
chunk_atom = Struct("chunk_atom",
UBInt32("len"),
Array(lambda ctx: ctx.len, PascalString("atom"))
)
chunk_expt = Struct("chunk_expt",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("function"),
UBInt32("arity"),
UBInt32("label"),
)
)
)
chunk_impt = Struct("chunk_impt",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("module"),
UBInt32("function"),
UBInt32("arity"),
)
)
)
chunk_loct = Struct("chunk_loct",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("function"),
UBInt32("arity"),
UBInt32("label"),
)
)
)
chunk = Struct("chunk",
String("chunk_name",4),
UBInt32("size"),
SeqOfOne("payload",
Switch("payload", lambda ctx: ctx.chunk_name,
{
"Atom" : chunk_atom,
"ExpT" : chunk_expt,
"ImpT" : chunk_impt,
# "Code" : chunk_code,
# "StrT" : chunk_strt,
# "Attr" : chunk_attr,
# "CInf" : chunk_cinf,
"LocT" : chunk_loct,
# "Trac" : chunk_trac,
},
default = String("skip", lambda ctx: ctx.size)
),
Padding(lambda ctx: (ctx.size+4) % 4, pattern = "\00"),
nested = False,
)
)
beam = Struct("beam",
OneOf(String('for1',4),['FOR1']),
UBInt32("size"),
OneOf(String('beam',4),['BEAM']),
GreedyRange(chunk),
)
__all__ = ["beam"]
| #
# Copyright (c) 2013 Matwey V. Kornilov <matwey.kornilov@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from construct import *
def align4(n):
return n + ((n+4) % 4)
chunk_atom = Struct("chunk_atom",
UBInt32("len"),
Array(lambda ctx: ctx.len, PascalString("atom"))
)
chunk_expt = Struct("chunk_expt",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("function"),
UBInt32("arity"),
UBInt32("label"),
)
)
)
chunk_impt = Struct("chunk_impt",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("module"),
UBInt32("function"),
UBInt32("arity"),
)
)
)
chunk_loct = Struct("chunk_loct",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("function"),
UBInt32("arity"),
UBInt32("label"),
)
)
)
chunk = Struct("chunk",
String("chunk_name",4),
UBInt32("size"),
Switch("payload", lambda ctx: ctx.chunk_name,
{
"Atom" : chunk_atom,
"ExpT" : chunk_expt,
"ImpT" : chunk_impt,
# "Code" : chunk_code,
# "StrT" : chunk_strt,
# "Attr" : chunk_attr,
# "CInf" : chunk_cinf,
"LocT" : chunk_loct,
# "Trac" : chunk_trac,
},
default = String("skip", lambda ctx: align4(ctx.size))
),
)
beam = Struct("beam",
OneOf(String('for1',4),['FOR1']),
UBInt32("size"),
OneOf(String('beam',4),['BEAM']),
GreedyRange(chunk),
)
__all__ = ["beam"]
| mit | Python |
ac4e2f23014c6aa2e3f27cd2556dd06e5188d5e2 | Update tree package __init__ to match other subpackages. | pyconll/pyconll,pyconll/pyconll | pyconll/tree/__init__.py | pyconll/tree/__init__.py | """
Defines a tree data structure for internal use within pyconll. This module's
logic is not intended to be used outside of pyconll, and is exposed here only
so that pyconll methods that expose the Tree data structure will have
appropriate documentation.
"""
__all__ = ['tree']
from .tree import Tree
| """
Defines the modules for interfacing with CoNLL sentences as trees. This
is a utility module which when provided a Sentence constructs the appropriate
or corresponding tree structure.
"""
| mit | Python |
2a402751f03da41aa9da4bed5add89e4746b5de9 | Bump version | messente/verigator-python | messente/verigator/__init__.py | messente/verigator/__init__.py | __version__ = "1.0.0"
| __version__ = "0.0.1"
| apache-2.0 | Python |
93bfbbb5ef729dd78087e8846cbe924b21b6e87e | Simplify plugin info declaration | pyexcel/pyexcel-text,pyexcel/pyexcel-text | pyexcel_text/__init__.py | pyexcel_text/__init__.py | """
pyexcel_text
~~~~~~~~~~~~~~~~~~~
Provide text output
:copyright: (c) 2014-2016 by C. W.
:license: New BSD
"""
from pyexcel.internal.common import PyexcelPluginList
__pyexcel_plugins__ = PyexcelPluginList(__name__).add_a_renderer(
submodule='_text',
file_types=[
'html',
'simple',
'plain',
'grid',
'pipe',
'orgtbl',
'rst',
'mediawiki',
'latex',
'latex_booktabs'
],
stream_type='string'
).add_a_renderer(
submodule='_json',
file_types=['json'],
stream_type='string'
)
| """
pyexcel_text
~~~~~~~~~~~~~~~~~~~
Provide text output
:copyright: (c) 2014-2016 by C. W.
:license: New BSD
"""
__TEXT_META__ = {
'plugin_type': 'renderer',
'submodule': '_text',
'file_types': [
'html',
'simple',
'plain',
'grid',
'pipe',
'orgtbl',
'rst',
'mediawiki',
'latex',
'latex_booktabs'
],
'stream_type': 'string'
}
__JSON_META__ = {
'plugin_type': 'renderer',
'submodule': '_json',
'file_types': ['json'],
'stream_type': 'string'
}
__pyexcel_plugins__ = [
__TEXT_META__,
__JSON_META__
]
| bsd-3-clause | Python |
0a1b3e2f27276145dc1c5698a5541b5d85f03091 | implement /login route | NaturalSolutions/NsPortal,NaturalSolutions/NsPortal,NaturalSolutions/NsPortal | Back/ns_portal/resources/root/security/oauth2/v1/login/login_resource.py | Back/ns_portal/resources/root/security/oauth2/v1/login/login_resource.py | from ns_portal.core.resources import (
MetaEndPointResource
)
from marshmallow import (
Schema,
fields,
EXCLUDE,
ValidationError
)
from ns_portal.database.main_db import (
TUsers
)
from sqlalchemy import (
and_,
select
)
from sqlalchemy.orm.exc import (
MultipleResultsFound
)
from ns_portal.database import (
Main_Db_Base
)
from pyramid.security import (
Allow,
Everyone,
remember
)
from ns_portal.utils import (
getToken
)
from pyramid.response import (
Response
)
class loginSchema(Schema):
username = fields.String(required=True)
password = fields.String(required=True)
class Meta:
unknown = EXCLUDE
class LoginResource(MetaEndPointResource):
__acl__ = [
(Allow, Everyone, 'create')
]
def validateUserCredentials(self, data):
query = self.request.dbsession.query(TUsers)
query = query.filter(
and_(
TUsers.TUse_Login == data.get('username'),
TUsers.TUse_Password == data.get('password')
)
)
try:
res = query.one_or_none()
except MultipleResultsFound:
raise ValidationError({
"error": (
f'your username and password are'
f' not unique in db'
f' please report it to an admin'
)
})
if res:
# this key is added after validation
return res
else:
raise ValidationError({
"error": (
f'your username and/or password'
f' are wrongs'
)
})
def buildPayload(self, params, policy):
viewToQuery = Main_Db_Base.metadata.tables['VAllUsersApplications']
query = select([
viewToQuery
]).where((
viewToQuery.c['TSit_Name'] == getattr(policy, 'TSit_Name'))
&
(viewToQuery.c['TUse_PK_ID'] == getattr(params, 'TUse_PK_ID'))
&
(viewToQuery.c['TRol_Label'] != 'Interdit'))
query = query.order_by(viewToQuery.c['TIns_Order'])
result = self.request.dbsession.execute(query).fetchall()
payload = {
"iss": 'NSPortal',
"sub": getattr(params, 'TUse_PK_ID'),
"username": getattr(params, 'TUse_Login'),
"userlanguage": getattr(params, 'TUse_Language'),
"roles": {
row.TIns_Label: row.TRol_Label for row in result
}
}
return payload
def POST(self):
reqParams = self.__parser__(
args=loginSchema(),
location='form'
)
userFound = self.validateUserCredentials(data=reqParams)
if userFound:
# CRITICAL START
# this method will return the object that handle
# policy in pyramid app
# the policy object store keys from conf for generate token
# policy = _get_authentication_policy(self.request)
# CRITICAL END
# payload = self.buildPayload(params=userFound, policy=policy)
token = getToken(
idUser=getattr(userFound, 'TUse_PK_ID'),
request=self.request
)
toRet = Response(
status=200,
json_body={
"token": token.decode('utf-8')
}
)
remember(toRet, token)
return toRet
else:
raise ValidationError({
"error": (
f'your username and/or password'
f' are wrongs'
)
})
| from ns_portal.core.resources import (
MetaEndPointResource
)
class LoginResource(MetaEndPointResource):
pass
| mit | Python |
f84da456028c5148f8c18041924a2f264ea6484b | set production s3 to use the cloudfront cdn | dstufft/jutils | crate_project/settings/production/base.py | crate_project/settings/production/base.py | from ..base import *
SITE_ID = 3
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
SERVER_EMAIL = "server@crate.io"
DEFAULT_FROM_EMAIL = "donald@crate.io"
CONTACT_EMAIL = "donald@crate.io"
MIDDLEWARE_CLASSES += ["privatebeta.middleware.PrivateBetaMiddleware"]
DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
STATICFILES_STORAGE = "storages.backends.s3boto.S3BotoStorage"
AWS_STORAGE_BUCKET_NAME = "crate-production"
AWS_S3_CUSTOM_DOMAIN = "packages.crate.io"
PRIVATE_BETA_ALLOWED_URLS = [
"/account/login/",
"/account/signup/",
]
| from ..base import *
SITE_ID = 3
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
SERVER_EMAIL = "server@crate.io"
DEFAULT_FROM_EMAIL = "donald@crate.io"
CONTACT_EMAIL = "donald@crate.io"
MIDDLEWARE_CLASSES += ["privatebeta.middleware.PrivateBetaMiddleware"]
DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
STATICFILES_STORAGE = "storages.backends.s3boto.S3BotoStorage"
AWS_STORAGE_BUCKET_NAME = "crate-production"
PRIVATE_BETA_ALLOWED_URLS = [
"/account/login/",
"/account/signup/",
]
| bsd-2-clause | Python |
7f0f1f9eddb10eb4bde1173bc02272b1ae3bd7b7 | Add celerybeat tasks to test_project settings | yprez/django-useful,yprez/django-useful | test_project/test_project/settings.py | test_project/test_project/settings.py | # Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 't^4dt#fkxftpborp@%lg*#h2wj%vizl)#pkkt$&0f7b87rbu6y'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.admin',
'djcelery',
'useful', # Import the app to run tests
)
BROKER_BACKEND = 'memory'
CELERY_ALWAYS_EAGER = True
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'cleanup': {
'task': 'useful.tasks.call_management_command',
'schedule': timedelta(seconds=10),
'args': ('validate', ),
},
}
| # Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 't^4dt#fkxftpborp@%lg*#h2wj%vizl)#pkkt$&0f7b87rbu6y'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.admin',
'djcelery',
'useful', # Import the app to run tests
)
BROKER_BACKEND = 'memory'
CELERY_ALWAYS_EAGER = True
| isc | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.