commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
e692728608e625fa4e0ae44af3f88154f6e421ee | Revise doc string. | bowen0701/algorithms_data_structures | alg_fibonacci.py | alg_fibonacci.py | """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def fibonacci_recur(n):
"""Get nth number of Fibonacci series by recursion.
Both of time & space complexities are O(n).
"""
if n <= 1:
return n
else:
return fibonacci_recur(n - 1) + fibonacci_recur(n - 2)
def fibonacci_dp(n):
"""Get nth number of Fibonacci series by dynamic programming.
DP performs much faster than recursion:
- Time complexity is still O(n).
- Space complexity is O(1), improving recusion version.
"""
a, b = 0, 1
for _ in xrange(n):
a, b = a+b, a
return a
def main():
import time
n = 35
start_time = time.time()
print('{}th number of Fibonacci series by recursion: {}'
.format(n, fibonacci_recur(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('{}th number of Fibonacci series by dynamic programming: {}'
.format(n, fibonacci_dp(n)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def fibonacci_recur(n):
"""Get nth number of Fibonacci series by recursion."""
if n <= 1:
return n
else:
return fibonacci_recur(n - 1) + fibonacci_recur(n - 2)
def fibonacci_dp(n):
"""Get nth number of Fibonacci series by dynamic programming.
DP performs much faster than recursion.
"""
a, b = 0, 1
for _ in xrange(n):
a, b = a+b, a
return a
def main():
import time
n = 35
start_time = time.time()
print('{}th number of Fibonacci series by recursion: {}'
.format(n, fibonacci_recur(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('{}th number of Fibonacci series by recursion: {}'
.format(n, fibonacci_dp(n)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
122ba6d8c6cc28e3b9c1d7abd942d51409cd70cf | Add package details | joshainglis/pyley,joshainglis/pyley,ziyasal/pyley,ziyasal/pyley | pyley/__init__.py | pyley/__init__.py | """
pyley Python client for an open-source graph database Cayley
:copyright: (c) 2014 by Ziya SARIKAYA @ziyasal.
:license: MIT, see LICENSE for more details.
"""
__title__ = 'pyley'
__version__ = '0.1.0'
__build__ = 0x00001
__author__ = 'Ziya SARIKAYA @ziyasal'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Ziya SARIKAYA @ziyasal'
VERSION = tuple(map(int, __version__.split('.')))
| __version__ = '0.1.0'
VERSION = tuple(map(int, __version__.split('.'))) | mit | Python |
6d4d507049c582ca01448510d873bf8ab6b30656 | fix project_name | Railslide/subvenv,Railslide/subvenv | virtualenvwrapper/subvenv.py | virtualenvwrapper/subvenv.py | import json
import logging
import os
log = logging.getLogger(__name__)
def post_mkproject(args):
project_name = os.path.basename(os.getenv('VIRTUAL_ENV'))
project_venv = os.path.join(os.getenv('WORKON_HOME'), project_name)
project_path_file = os.path.join(project_venv, '.project')
project_interpreter = os.path.join(project_venv, 'bin/python')
with open(project_path_file, 'r') as f:
project_folder = f.readline().rstrip('\r\n')
sublime_file_name = "{}.sublime-project".format(project_name)
settings_text = {
"folders": [
{
"follow_symlinks": True,
"path": project_folder,
},
],
"settings": {
"python_interpreter": project_interpreter,
},
}
target_path = (os.path.join(project_folder, sublime_file_name))
with open(target_path, 'w') as f:
f.write(json.dumps(settings_text, sort_keys=True, indent=4))
return
| import json
import logging
import os
log = logging.getLogger(__name__)
def post_mkproject(args):
project_name = os.path.split(os.getenv('PWD'))[1]
project_venv = os.path.join(os.getenv('WORKON_HOME'), project_name)
project_path_file = os.path.join(project_venv, '.project')
project_interpreter = os.path.join(project_venv, 'bin/python')
with open(project_path_file, 'r') as f:
project_folder = f.readline().rstrip('\r\n')
sublime_file_name = "{}.sublime-project".format(project_name)
settings_text = {
"folders":
[
{
"follow_symlinks": True,
"path": project_folder,
},
],
"settings":
{
"python_interpreter": project_interpreter,
},
}
target_path = (os.path.join(project_folder, sublime_file_name))
with open(target_path, 'w') as f:
f.write(json.dumps(settings_text, sort_keys=True, indent=4))
return
if __name__ == '__main__':
post_mkproject('hello')
| mit | Python |
5a3033aeb63ee9302cba8919814f798ab41bd79c | Add /dashboards to list of paths | alexanderlz/redash,alexanderlz/redash,stefanseifert/redash,vishesh92/redash,M32Media/redash,useabode/redash,crowdworks/redash,moritz9/redash,denisov-vlad/redash,amino-data/redash,stefanseifert/redash,getredash/redash,vishesh92/redash,moritz9/redash,moritz9/redash,chriszs/redash,getredash/redash,rockwotj/redash,moritz9/redash,chriszs/redash,hudl/redash,rockwotj/redash,denisov-vlad/redash,M32Media/redash,alexanderlz/redash,rockwotj/redash,hudl/redash,imsally/redash,getredash/redash,denisov-vlad/redash,EverlyWell/redash,imsally/redash,amino-data/redash,vishesh92/redash,EverlyWell/redash,hudl/redash,crowdworks/redash,hudl/redash,useabode/redash,stefanseifert/redash,crowdworks/redash,M32Media/redash,EverlyWell/redash,denisov-vlad/redash,vishesh92/redash,amino-data/redash,amino-data/redash,44px/redash,imsally/redash,M32Media/redash,stefanseifert/redash,EverlyWell/redash,crowdworks/redash,denisov-vlad/redash,rockwotj/redash,useabode/redash,stefanseifert/redash,imsally/redash,useabode/redash,getredash/redash,44px/redash,chriszs/redash,chriszs/redash,alexanderlz/redash,getredash/redash,44px/redash,44px/redash | redash/handlers/static.py | redash/handlers/static.py | import os
from flask import current_app, safe_join, send_file
from flask_login import login_required
from redash import settings
from redash.handlers import routes
from redash.handlers.base import org_scoped_rule
from werkzeug.exceptions import NotFound
@routes.route('/<path:filename>')
def send_static(filename):
if current_app.debug:
cache_timeout = 0
else:
cache_timeout = None
# The following is copied from send_from_directory, and extended to support multiple directories
for path in settings.STATIC_ASSETS_PATHS:
full_path = safe_join(path, filename)
if os.path.isfile(full_path):
return send_file(full_path, **dict(cache_timeout=cache_timeout, conditional=True))
raise NotFound()
@login_required
def index(**kwargs):
full_path = safe_join(settings.STATIC_ASSETS_PATHS[-2], 'index.html')
return send_file(full_path, **dict(cache_timeout=0, conditional=True))
def register_static_routes(rules):
# Make sure that / is the first route considered as index.
routes.add_url_rule(org_scoped_rule("/"), "index", index)
for rule in rules:
routes.add_url_rule(org_scoped_rule(rule), None, index)
rules = ['/admin/<anything>/<whatever>',
'/admin/<anything>',
'/dashboards',
'/dashboard/<anything>',
'/dashboards/<anything>',
'/alerts',
'/alerts/<pk>',
'/queries',
'/data_sources',
'/data_sources/<pk>',
'/users',
'/users/<pk>',
'/destinations',
'/destinations/<pk>',
'/query_snippets',
'/query_snippets/<pk>',
'/groups',
'/groups/<pk>',
'/groups/<pk>/data_sources',
'/queries/<query_id>',
'/queries/<query_id>/<anything>',
'/personal']
register_static_routes(rules)
| import os
from flask import current_app, safe_join, send_file
from flask_login import login_required
from redash import settings
from redash.handlers import routes
from redash.handlers.base import org_scoped_rule
from werkzeug.exceptions import NotFound
@routes.route('/<path:filename>')
def send_static(filename):
if current_app.debug:
cache_timeout = 0
else:
cache_timeout = None
# The following is copied from send_from_directory, and extended to support multiple directories
for path in settings.STATIC_ASSETS_PATHS:
full_path = safe_join(path, filename)
if os.path.isfile(full_path):
return send_file(full_path, **dict(cache_timeout=cache_timeout, conditional=True))
raise NotFound()
@login_required
def index(**kwargs):
full_path = safe_join(settings.STATIC_ASSETS_PATHS[-2], 'index.html')
return send_file(full_path, **dict(cache_timeout=0, conditional=True))
def register_static_routes(rules):
# Make sure that / is the first route considered as index.
routes.add_url_rule(org_scoped_rule("/"), "index", index)
for rule in rules:
routes.add_url_rule(org_scoped_rule(rule), None, index)
rules = ['/admin/<anything>/<whatever>',
'/admin/<anything>',
'/dashboard/<anything>',
'/alerts',
'/alerts/<pk>',
'/queries',
'/data_sources',
'/data_sources/<pk>',
'/users',
'/users/<pk>',
'/destinations',
'/destinations/<pk>',
'/query_snippets',
'/query_snippets/<pk>',
'/groups',
'/groups/<pk>',
'/groups/<pk>/data_sources',
'/queries/<query_id>',
'/queries/<query_id>/<anything>',
'/personal']
register_static_routes(rules)
| bsd-2-clause | Python |
e9fe93d5e178a0e1f63b8bcfc7a004ecf813e16c | Fix extension table removing migration | lonnen/socorro,lonnen/socorro,lonnen/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro,lonnen/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro | alembic/versions/0193b4725f32_bug_1132858_remove_extensions.py | alembic/versions/0193b4725f32_bug_1132858_remove_extensions.py | """bug 1132858 remove extensions table
Revision ID: 0193b4725f32
Revises: bb8cdbb8a6bd
Create Date: 2018-01-31 14:21:41.032179
"""
# revision identifiers, used by Alembic.
revision = '0193b4725f32'
down_revision = 'bb8cdbb8a6bd'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
# Get a list of ALL tables that start with 'extensions'
connection = op.get_bind()
cursor = connection.connection.cursor()
cursor.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_name like 'extensions%'
""")
all_table_names = []
for records in cursor.fetchall():
all_table_names.append(records[0])
# Sort table names so "extensions" is last since the others depend on it
# and delete them in that order
all_table_names.sort(reverse=True)
for table_name in all_table_names:
op.execute('DROP TABLE IF EXISTS {}'.format(table_name))
# Now remove the entry from report_partition_info so the crontabber job
# doesn't try to create a new partition
op.execute("""
DELETE FROM report_partition_info WHERE table_name = 'extensions'
""")
def downgrade():
# There is no going back.
pass
| """bug 1132858 remove extensions table
Revision ID: 0193b4725f32
Revises: bb8cdbb8a6bd
Create Date: 2018-01-31 14:21:41.032179
"""
# revision identifiers, used by Alembic.
revision = '0193b4725f32'
down_revision = 'bb8cdbb8a6bd'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
# Get a list of ALL tables that start with 'extensions_*' and drop them
connection = op.get_bind()
cursor = connection.connection.cursor()
cursor.execute("""
SELECT c.relname FROM pg_catalog.pg_class c
WHERE c.relkind = 'r' AND c.relname LIKE 'extensions_%'
""")
all_table_names = []
for records in cursor.fetchall():
table_name, = records
all_table_names.append(table_name)
all_table_names.sort(reverse=True)
for table_name in all_table_names:
op.execute('DROP TABLE IF EXISTS {}'.format(table_name))
# Now drop the extensions table itself
op.execute('DROP TABLE IF EXISTS extensions')
# Now remove the entry from report_partition_info so the crontabber job
# doesn't try to create a new partition
op.execute("""
DELETE FROM report_partition_info WHERE table_name = 'extensions'
""")
def downgrade():
# There is no going back.
pass
| mpl-2.0 | Python |
f5a67ab54324e0eb1779ee863d0f2a458bd6f0a3 | Bump module version to 10.0.1.0.0 | akretion/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil | l10n_br_sale/__manifest__.py | l10n_br_sale/__manifest__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Sale',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '10.0.1.0.0',
'depends': [
'l10n_br_account',
'account_fiscal_position_rule_sale',
],
'data': [
'data/l10n_br_sale_data.xml',
'views/sale_view.xml',
'views/res_config_view.xml',
'security/ir.model.access.csv',
'security/l10n_br_sale_security.xml',
'report/sale_report_view.xml',
],
'test': [],
'demo': [],
'installable': False,
'auto_install': True,
}
| # -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Sale',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '8.0.1.0.0',
'depends': [
'l10n_br_account',
'account_fiscal_position_rule_sale',
],
'data': [
'data/l10n_br_sale_data.xml',
'views/sale_view.xml',
'views/res_config_view.xml',
'security/ir.model.access.csv',
'security/l10n_br_sale_security.xml',
'report/sale_report_view.xml',
],
'test': [],
'demo': [],
'installable': False,
'auto_install': True,
}
| agpl-3.0 | Python |
cea7e61a22eb0e996c8171b2daec4de562b495ac | Add TODO to replace encoding stuff with urllib.quote where possible | isagalaev/sm-openid,necaris/python3-openid,moreati/python3-openid,moreati/python3-openid,misli/python3-openid,moreati/python3-openid,misli/python3-openid,necaris/python3-openid,misli/python3-openid | openid/codecutil.py | openid/codecutil.py | import codecs
try:
chr(0x10000)
except ValueError:
# narrow python build
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
]
IPRIVATE = [
(0xE000, 0xF8FF),
]
else:
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
]
IPRIVATE = [
(0xE000, 0xF8FF),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
_ESCAPE_RANGES = UCSCHAR + IPRIVATE
def _in_escape_range(octet):
for start, end in _ESCAPE_RANGES:
if start <= octet <= end:
return True
return False
def _pct_escape_handler(err):
'''
Encoding error handler that does percent-escaping of Unicode, to be used
with codecs.register_error
TODO: replace use of this with urllib.parse.quote as appropriate
'''
chunk = err.object[err.start:err.end]
replacements = []
for octet in chunk.encode("utf-8"):
if _in_escape_range(octet):
replacements.append("%%%X" % octet)
else:
replacements.append(chr(octet))
return ("".join(replacements), err.end)
codecs.register_error("oid_percent_escape", _pct_escape_handler)
| import codecs
try:
chr(0x10000)
except ValueError:
# narrow python build
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
]
IPRIVATE = [
(0xE000, 0xF8FF),
]
else:
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
]
IPRIVATE = [
(0xE000, 0xF8FF),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
_ESCAPE_RANGES = UCSCHAR + IPRIVATE
def _in_escape_range(octet):
for start, end in _ESCAPE_RANGES:
if start <= octet <= end:
return True
return False
def _pct_escape_handler(err):
'''
Encoding error handler that does percent-escaping of Unicode, to be used
with codecs.register_error
'''
chunk = err.object[err.start:err.end]
replacements = []
for octet in chunk.encode("utf-8"):
if _in_escape_range(octet):
replacements.append("%%%X" % octet)
else:
replacements.append(chr(octet))
return ("".join(replacements), err.end)
codecs.register_error("oid_percent_escape", _pct_escape_handler)
| apache-2.0 | Python |
b434c09b38dbed349d0965e60b736eaab23fa218 | Move import in to apps.ready() | onespacemedia/cms-faqs,onespacemedia/cms-faqs | apps/faqs/apps.py | apps/faqs/apps.py | from django.apps import AppConfig
from watson import search as watson
class FaqsConfig(AppConfig):
name = '{{ project_name }}.apps.faqs'
verbose_name = 'FAQ'
verbose_name_plural = 'FAQs'
def ready(self):
from cms.models import PageBaseSearchAdapter
Faq = self.get_model('Faq')
watson.register(Faq, adapter_cls=PageBaseSearchAdapter)
| from cms.models import PageBaseSearchAdapter
from django.apps import AppConfig
from watson import search as watson
class FaqsConfig(AppConfig):
name = '{{ project_name }}.apps.faqs'
verbose_name = 'FAQ'
verbose_name_plural = 'FAQs'
def ready(self):
Faq = self.get_model('Faq')
watson.register(Faq, adapter_cls=PageBaseSearchAdapter)
| mit | Python |
4c76a99e1d72820a367d2195fbd3edc1b0af30fd | Add options to Tag model fields. [skip ci] | jambonrose/DjangoUnleashed-1.8,jambonrose/DjangoUnleashed-1.8 | organizer/models.py | organizer/models.py | from django.db import models
# Model Field Reference
# https://docs.djangoproject.com/en/1.8/ref/models/fields/
class Tag(models.Model):
name = models.CharField(
max_length=31, unique=True)
slug = models.SlugField(
max_length=31,
unique=True,
help_text='A label for URL config.')
class Startup(models.Model):
name = models.CharField(max_length=31)
slug = models.SlugField()
description = models.TextField()
founded_date = models.DateField()
contact = models.EmailField()
website = models.URLField()
tags = models.ManyToManyField(Tag)
class NewsLink(models.Model):
title = models.CharField(max_length=63)
pub_date = models.DateField()
link = models.URLField()
startup = models.ForeignKey(Startup)
| from django.db import models
# Model Field Reference
# https://docs.djangoproject.com/en/1.8/ref/models/fields/
class Tag(models.Model):
name = models.CharField(max_length=31)
slug = models.SlugField()
class Startup(models.Model):
name = models.CharField(max_length=31)
slug = models.SlugField()
description = models.TextField()
founded_date = models.DateField()
contact = models.EmailField()
website = models.URLField()
tags = models.ManyToManyField(Tag)
class NewsLink(models.Model):
title = models.CharField(max_length=63)
pub_date = models.DateField()
link = models.URLField()
startup = models.ForeignKey(Startup)
| bsd-2-clause | Python |
3de2dc88d6ae25200e4c339cf732c665d1c9826a | fix lint | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/domain/management/commands/update_dimagi_created_self_started_projects.py | corehq/apps/domain/management/commands/update_dimagi_created_self_started_projects.py | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from corehq.apps.users.models import CouchUser
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
help = "Sets all dimagi account created projects as non-self-started"
@staticmethod
def get_dimagi_account_users():
return User.objects.filter(username__endswith="@dimagi.com")
@staticmethod
def update_domain_if_self_started(domain_name, username):
project = Domain.get_by_name(domain_name)
if (project
and project.creating_user
and project.creating_user == username
and project.internal.self_started):
print("Updating domain: {domain_name} with username: {username}".format(
domain_name=domain_name, username=username))
project.internal.self_started = False
project.save()
def handle(self, *args, **options):
for dimagi_user in self.get_dimagi_account_users():
couch_user = CouchUser.from_django_user(dimagi_user)
if couch_user:
username = dimagi_user.username
print("username: " + username)
for domain_name in couch_user.get_domains():
print("domain: " + domain_name)
self.update_domain_if_self_started(domain_name, username)
| from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from corehq.apps.users.models import CouchUser
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
help = "Sets all dimagi account created projects as non-self-started"
@staticmethod
def get_dimagi_account_users():
return User.objects.filter(username__endswith="@dimagi.com")
@staticmethod
def update_domain_if_self_started(domain_name, username):
project = Domain.get_by_name(domain_name)
if (project
and project.creating_user
and project.creating_user == username
and project.internal.self_started):
print("Updating domain: {domain_name} with username: {username}".format(
domain_name=domain_name, username=username))
project.internal.self_started = False
project.save()
def handle(self, *args, **options):
for dimagi_user in self.get_dimagi_account_users():
couch_user = CouchUser.from_django_user(dimagi_user)
if couch_user:
username = dimagi_user.username
print("username: " + username)
for domain_name in couch_user.get_domains():
print("domain: " + domain_name)
self.update_domain_if_self_started(domain_name, username)
| bsd-3-clause | Python |
dbf7ab3ae20dd04bf6ead67ae4328a66d3733875 | Add comments | bowen0701/algorithms_data_structures | lc0040_combination_sum_ii.py | lc0040_combination_sum_ii.py | """Leetcode 40. Combination Sum II
Medium
URL: https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
- All numbers (including target) will be positive integers.
- The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, start, target, candidates):
if target < 0:
# No way to further combine numbers.
return None
if target == 0:
# Use shallow copy.
result.append(temp[:])
return None
# From start to the end of candidates.
for i in range(start, len(candidates)):
# Avoid duplicate by checking the previous cadidate.
if i == start or candidates[i] != candidates[i - 1]:
temp.append(candidates[i])
# Use next index i+1 since we cannot use same element.
self._backtrack(result, temp, i + 1,
target - candidates[i], candidates)
# Pop for backtracking.
temp.pop()
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
Apply backtracking with sorting to avoid duplicates.
Time complexity: O(2^n).
Space complexity: O(k).
"""
# Sort candidates to avoid duplicates.
candidates.sort()
result = []
temp = []
start = 0
self._backtrack(result, temp, start, target, candidates)
return result
def main():
# Output: [[1, 7],[1, 2, 5],[2, 6],[1, 1, 6]]
candidates = [10,1,2,7,6,1,5]
target = 8
print SolutionBacktrack().combinationSum2(candidates, target)
# Output: [[1, 2, 2],[5]]
candidates = [2,5,2,1,2]
target = 5
print SolutionBacktrack().combinationSum2(candidates, target)
if __name__ == '__main__':
main()
| """Leetcode 40. Combination Sum II
Medium
URL: https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
- All numbers (including target) will be positive integers.
- The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, start, target, candidates):
if target < 0:
# No way to further combine numbers.
return None
if target == 0:
# Use shallow copy.
result.append(temp[:])
return None
# From start to the end of candidates.
for i in range(start, len(candidates)):
if i == start or candidates[i] != candidates[i - 1]:
temp.append(candidates[i])
# Use next index i+1 since we cannot use same element.
self._backtrack(result, temp, i + 1,
target - candidates[i], candidates)
# Pop for backtracking.
temp.pop()
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
Apply backtracking with sorting to avoid duplicates.
Time complexity: O(2^n).
Space complexity: O(k).
"""
# Sort candidates to avoid duplicates.
candidates.sort()
result = []
temp = []
start = 0
self._backtrack(result, temp, start, target, candidates)
return result
def main():
# Output: [[1, 7],[1, 2, 5],[2, 6],[1, 1, 6]]
candidates = [10,1,2,7,6,1,5]
target = 8
print SolutionBacktrack().combinationSum2(candidates, target)
# Output: [[1, 2, 2],[5]]
candidates = [2,5,2,1,2]
target = 5
print SolutionBacktrack().combinationSum2(candidates, target)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
1a1b8297c495346e42c32e14202c4655afbfd43e | Fix PyHeufyBot migration tool | DesertBot/DesertBot | migration/pyheufybot/util.py | migration/pyheufybot/util.py | from desertbot.config import Config
from desertbot.datastore import DataStore
import argparse, os, shelve
class PyHeufyBotUtil(object):
def __init__(self, section):
parser = argparse.ArgumentParser(description='PyHeufyBot shelve parsing tool.')
parser.add_argument('-s', '--storage', help='The storage file to use', type=str, default='../../data/heufybot.db')
parser.add_argument('-n', '--network', help='The network name to import from', type=str, required=True)
parser.add_argument('-c', '--config', help='the config file to read from', type=str, required=True)
options = parser.parse_args()
self.config = Config(options.config)
self.config.loadConfig()
with shelve.open(options.storage) as storage:
self.data = storage[section][options.network]
storage.close()
self.rootDir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
self.dataPath = os.path.join(self.rootDir, 'data', self.network)
if not os.path.exists(self.dataPath):
os.makedirs(self.dataPath)
self.storage = DataStore(os.path.join(self.dataPath, 'desertbot.json'))
| from desertbot.config import Config
from desertbot.datastore import DataStore
import argparse, os, shelve
class PyHeufyBotUtil(object):
def __init__(self, section):
parser = argparse.ArgumentParser(description='PyHeufyBot shelve parsing tool.')
parser.add_argument('-s', '--storage', help='The storage file to use', type=str, default='../../data/heufybot.db')
parser.add_argument('-n', '--network', help='The network name to import from', type=str, required=True)
parser.add_argument('-c', '--config', help='the config file to read from', type=str, required=True)
options = parser.parse_args()
self.config = Config(options.config)
self.config.loadConfig()
with shelve.open(options.storage) as storage:
self.data = storage[section][options.network]
storage.close()
self.rootDir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
self.dataPath = os.path.join(self.rootDir, 'data', self.server)
if not os.path.exists(self.dataPath):
os.makedirs(self.dataPath)
self.storage = DataStore(os.path.join(self.dataPath, 'desertbot.json'))
| mit | Python |
2a83bb654d625a1f5d879c00f138f77984c14ce5 | add simple test for bug issue #15 | codeforamerica/straymapper,codeforamerica/straymapper,codeforamerica/straymapper | animals/tests.py | animals/tests.py | from django.core.urlresolvers import reverse
from django.test import TestCase
class AnimalsViewsTestCase(TestCase):
def test_index(self):
resp = self.client.get(reverse('animals_index'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('form' in resp.context)
self.assertTrue('alist' in resp.context)
self.assertTrue('results_count' in resp.context)
def test_cat_search(self):
resp = self.client.post(reverse('animals_index'), {'animal_type': 'CAT'})
self.assertEqual(resp.status_code, 200)
| from django.test import TestCase
class AnimalsViewsTestCase(TestCase):
def test_index(self):
resp = self.client.get('/animals/')
self.assertEqual(resp.status_code, 200)
self.assertTrue('form' in resp.context)
self.assertTrue('alist' in resp.context)
self.assertTrue('results_count' in resp.context)
| bsd-3-clause | Python |
4fedb170469888a20324ca79176f761ca7c5ac1e | Remove rewrap_text from utils.py in favor of textwrap in standard lib | radicalbiscuit/garnish | garnish/utils.py | garnish/utils.py | import sys
def fill_template(temp, args, longname, filename, url):
"""
Takes a template string (temp) and replaces all template keywords with
information from commandline arguments.
"""
temp = temp.replace('OWNER_NAME', args.copyright_holder)
temp = temp.replace('COPYRIGHT_YEAR', args.year)
temp = temp.replace('PROGRAM_NAME', args.program_name)
temp = temp.replace('LICENSE_LONGNAME', longname)
temp = temp.replace('LICENSE_FILENAME', filename)
temp = temp.replace('LICENSE_URL', url)
return temp
def exit(bad=False):
if bad:
print 'The operation was not completed successfully.'
sys.exit(1)
else:
sys.exit(0)
| import sys
def fill_template(temp, args, longname, filename, url):
"""
Takes a template string (temp) and replaces all template keywords with
information from commandline arguments.
"""
temp = temp.replace('OWNER_NAME', args.copyright_holder)
temp = temp.replace('COPYRIGHT_YEAR', args.year)
temp = temp.replace('PROGRAM_NAME', args.program_name)
temp = temp.replace('LICENSE_LONGNAME', longname)
temp = temp.replace('LICENSE_FILENAME', filename)
temp = temp.replace('LICENSE_URL', url)
return temp
def rewrap_text(readlines_list, textwidth=80):
"""
Input: list of strings generated by readlines
Returns: list of strings, adjusted to be less than 80 characters per line.
"""
def tokenize(line):
return line.split(' ')
for line in readlines_list:
if len(line) > textwidth
def exit(bad=False):
if bad:
print 'The operation was not completed successfully.'
sys.exit(1)
else:
sys.exit(0)
| mit | Python |
001206bb9f363b8574187d792a96ee596025ad78 | replace | with : as sep | tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj,tttor/csipb-jamu-prj | database/inserter/insert_disease.py | database/inserter/insert_disease.py | # insert_disease.py
import os
import sys
import time
import json
import yaml
import MySQLdb
import pickle
import psycopg2
import postgresql_util as pg
from collections import defaultdict
from bs4 import BeautifulSoup
from urllib2 import urlopen
from datetime import datetime
def main(argv):
assert len(argv)>=7
db = argv[1]
user = argv[2]; passwd = argv[3]
host = argv[4]; port = argv[5]
paths = argv[6:]
conn = psycopg2.connect(database=db, user=user, password=passwd,
host=host, port=port)
csr = conn.cursor()
insertDiseaseUniprot(csr,paths[0])
conn.commit()
conn.close()
def insertDiseaseUniprot(csr,fpath):
diseaseList = None
with open(fpath, 'rb') as handle:
diseaseList = pickle.load(handle)
omimDict = dict()
for d in diseaseList:
name, abbrv, omimId = d
if omimId in omimDict:
omimDict[omimId]['name'].append(name)
omimDict[omimId]['abbrv'].append(abbrv)
else:
omimDict[omimId] = {'name':[name],'abbrv':[abbrv]}
idx = 0; n = len(omimDict)
for omimId,v in omimDict.iteritems():
idx += 1
idStr = str(idx)
idStr = idStr.zfill(8)
idStr = 'DIS'+idStr
print 'inserting ', idx, 'of', n
name = ':'.join(v['name']);
name = name.replace("'","''")
abbrv = ':'.join(v['abbrv'])
dis = [idStr,omimId,name,abbrv]
dis = ["'"+i+"'" for i in dis]
qf = 'INSERT INTO disease (dis_id,dis_omim_id,dis_name,dis_uniprot_abbrv) VALUES ('
qm = ','.join(dis)
qr = ')'
sql = qf+qm+qr
csr.execute(sql)
if __name__ == '__main__':
start_time = time.time()
main(sys.argv)
print("--- %s seconds ---" % (time.time() - start_time))
| # insert_disease.py
import os
import sys
import time
import json
import yaml
import MySQLdb
import pickle
import psycopg2
import postgresql_util as pg
from collections import defaultdict
from bs4 import BeautifulSoup
from urllib2 import urlopen
from datetime import datetime
def main(argv):
assert len(argv)>=7
db = argv[1]
user = argv[2]; passwd = argv[3]
host = argv[4]; port = argv[5]
paths = argv[6:]
conn = psycopg2.connect(database=db, user=user, password=passwd,
host=host, port=port)
csr = conn.cursor()
insertDiseaseUniprot(csr,paths[0])
conn.commit()
conn.close()
def insertDiseaseUniprot(csr,fpath):
diseaseList = None
with open(fpath, 'rb') as handle:
diseaseList = pickle.load(handle)
omimDict = dict()
for d in diseaseList:
name, abbrv, omimId = d
if omimId in omimDict:
omimDict[omimId]['name'].append(name)
omimDict[omimId]['abbrv'].append(abbrv)
else:
omimDict[omimId] = {'name':[name],'abbrv':[abbrv]}
idx = 0; n = len(omimDict)
for omimId,v in omimDict.iteritems():
idx += 1
idStr = str(idx)
idStr = idStr.zfill(8)
idStr = 'DIS'+idStr
print 'inserting ', idx, 'of', n
name = '|'.join(v['name']);
name = name.replace("'","''")
abbrv = '|'.join(v['abbrv'])
dis = [idStr,omimId,name,abbrv]
dis = ["'"+i+"'" for i in dis]
qf = 'INSERT INTO disease (dis_id,dis_omim_id,dis_name,dis_uniprot_abbrv) VALUES ('
qm = ','.join(dis)
qr = ')'
sql = qf+qm+qr
csr.execute(sql)
if __name__ == '__main__':
start_time = time.time()
main(sys.argv)
print("--- %s seconds ---" % (time.time() - start_time))
| mit | Python |
bcf9f5da40ad34276b8be1cbc2d8c9e2fd70e9ac | update the monitors benchmark | enthought/pikos,enthought/pikos,enthought/pikos | pikos/benchmark/monitors.py | pikos/benchmark/monitors.py | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: benchmark/monitors.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
""" Estimate the overhead cost of using a monitor.
The benchmark runs the pystones benchmark under each monitor and calculates
the overhead.
"""
from test import pystone
from pikos.benchmark.record_counter import RecordCounter
def pymonitors():
""" Pure python monitors """
from pikos.monitors.api import (
FunctionMonitor, LineMonitor,
FunctionMemoryMonitor, LineMemoryMonitor)
return {
'FunctionMonitor': FunctionMonitor,
'LineMonitor': LineMonitor,
'FunctionMemoryMonitor': FunctionMemoryMonitor,
'LineMemoryMonitor': LineMemoryMonitor}
def cmonitors():
""" Cython monitors """
from pikos.monitors.api import FunctionMonitor
return {
'CFunctionMonitor': FunctionMonitor}
def main(monitors, loops=1000):
header = (
"Overhead time | Relative overhead | "
"{:^10} | Per record | {:^{length}}".format(
'Records', 'Name',
length=max(len(key) for key in monitors) - 4))
line = ('{time:>13} | {relative:>17} | {records:>10} '
'| {time_per_record:.6e} | {name}')
print header
print len(header) * '-'
expected_time, _ = pystone.pystones(loops)
for name, monitor in monitors.iteritems():
recorder = RecordCounter()
with monitor(recorder=recorder):
time, _ = pystone.pystones(loops)
time_per_record = (time - expected_time) / recorder.records
print line.format(
name=name,
time='{:2.2f}'.format(time - expected_time),
relative='{:.2%}'.format((time - expected_time) / expected_time),
time_per_record=time_per_record,
records='{:10d}'.format(recorder.records))
if __name__ == '__main__':
monitors = pymonitors()
monitors.update(cmonitors())
main(monitors)
| # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: benchmark/monitors.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
""" Estimate the overhead cost of using a monitor.
The benchmark runs the pystones benchmark under each monitor and calculates
the overhead.
"""
from test import pystone
from pikos.monitors.api import *
from pikos.cmonitors.api import *
from pikos.benchmark.record_counter import RecordCounter
monitors = {
# Pure python monitors
'FunctionMonitor': FunctionMonitor,
'LineMonitor': LineMonitor,
'FunctionMemoryMonitor': FunctionMemoryMonitor,
'LineMemoryMonitor': LineMemoryMonitor,
'CFunctionMonitor': CFunctionMonitor,
}
def main(monitors, loops=1000):
header = (
"Overhead time | Relative overhead | "
"{:^10} | Per record | {:^{length}}".format(
'Records', 'Name',
length=max(len(key) for key in monitors) - 4))
line = ('{time:>13} | {relative:>17} | {records:>10} '
'| {time_per_record:.6e} | {name}')
print header
print len(header) * '-'
expected_time, _ = pystone.pystones(loops)
for name, monitor in monitors.iteritems():
recorder = RecordCounter()
with monitor(recorder=recorder):
time, _ = pystone.pystones(loops)
time_per_record = (time - expected_time) / recorder.records
print line.format(
name=name,
time='{:2.2f}'.format(time - expected_time),
relative='{:.2%}'.format((time - expected_time) / expected_time),
time_per_record=time_per_record,
records='{:10d}'.format(recorder.records))
if __name__ == '__main__':
main(monitors)
| bsd-3-clause | Python |
6df173345c5ac0038a402a53f47a9f1095ecc31c | Add key field to table and autoincrement it. | prcutler/nflpool,prcutler/nflpool | create_player_picks_table.py | create_player_picks_table.py | import sqlite3
import json
# This file creates the table in the database to store each player's picks for every category in NFLPool.
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS picks;
CREATE TABLE picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL,
id INTEGER PRIMARY KEY AUTOINCREMENT,
afc_east_first_pick TEXT NOT NULL,
afc_east_second_pick TEXT NOT NULL,
afc_east_last_pick TEXT NOT NULL,
afc_north_first_pick TEXT NOT NULL,
afc_north_second_pick TEXT NOT NULL,
afc_north_last_pick TEXT NOT NULL,
afc_south_first_pick TEXT NOT NULL,
afc_south_second_pick TEXT NOT NULL,
afc_south_last_pick TEXT NOT NULL,
afc_west_first_pick TEXT NOT NULL,
afc_west_second_pick TEXT NOT NULL,
afc_west_last_pick TEXT NOT NULL,
nfc_east_first_pick TEXT NOT NULL,
nfc_east_second_pick TEXT NOT NULL,
nfc_east_last_pick TEXT NOT NULL,
nfc_north_first_pick TEXT NOT NULL,
nfc_north_second_pick TEXT NOT NULL,
nfc_north_last_pick TEXT NOT NULL,
nfc_south_first_pick TEXT NOT NULL,
nfc_south_second_pick TEXT NOT NULL,
nfc_south_last_pick TEXT NOT NULL,
nfc_west_first_pick TEXT NOT NULL,
nfc_west_second_pick TEXT NOT NULL,
nfc_west_last_pick TEXT NOT NULL,
afc_wildcard1_pick TEXT NOT NULL,
afc_wildcard2_pick TEXT NOT NULL,
nfc_wildcard1_pick TEXT NOT NULL,
nfc_wildcard2_pick TEXT NOT NULL,
afc_rushing_leader_pick TEXT NOT NULL,
afc_passing_leader_pick TEXT NOT NULL,
afc_receiving_leader_pick TEXT NOT NULL,
afc_sacks_leader_pick TEXT NOT NULL,
afc_int_leader_pick TEXT NOT NULL,
nfc_rushing_leader_pick TEXT NOT NULL,
nfc_passing_leader_pick TEXT NOT NULL,
nfc_receiving_leader_pick TEXT NOT NULL,
nfc_sacks_leader_pick TEXT NOT NULL,
nfc_int_leader_pick TEXT NOT NULL,
afc_pf_pick TEXT NOT NULL,
nfc_pf_pick TEXT NOT NULL,
tiebreaker_pick TEXT NOT NULL
)
''')
conn.commit()
conn.close()
| import sqlite3
import json
# This file creates the table in the database to store each player's picks for every category in NFLPool.
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS picks;
CREATE TABLE picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL,
afc_east_first_pick TEXT NOT NULL,
afc_east_second_pick TEXT NOT NULL,
afc_east_last_pick TEXT NOT NULL,
afc_north_first_pick TEXT NOT NULL,
afc_north_second_pick TEXT NOT NULL,
afc_north_last_pick TEXT NOT NULL,
afc_south_first_pick TEXT NOT NULL,
afc_south_second_pick TEXT NOT NULL,
afc_south_last_pick TEXT NOT NULL,
afc_west_first_pick TEXT NOT NULL,
afc_west_second_pick TEXT NOT NULL,
afc_west_last_pick TEXT NOT NULL,
nfc_east_first_pick TEXT NOT NULL,
nfc_east_second_pick TEXT NOT NULL,
nfc_east_last_pick TEXT NOT NULL,
nfc_north_first_pick TEXT NOT NULL,
nfc_north_second_pick TEXT NOT NULL,
nfc_north_last_pick TEXT NOT NULL,
nfc_south_first_pick TEXT NOT NULL,
nfc_south_second_pick TEXT NOT NULL,
nfc_south_last_pick TEXT NOT NULL,
nfc_west_first_pick TEXT NOT NULL,
nfc_west_second_pick TEXT NOT NULL,
nfc_west_last_pick TEXT NOT NULL,
afc_wildcard1_pick TEXT NOT NULL,
afc_wildcard2_pick TEXT NOT NULL,
nfc_wildcard1_pick TEXT NOT NULL,
nfc_wildcard2_pick TEXT NOT NULL,
afc_rushing_leader_pick TEXT NOT NULL,
afc_passing_leader_pick TEXT NOT NULL,
afc_receiving_leader_pick TEXT NOT NULL,
afc_sacks_leader_pick TEXT NOT NULL,
afc_int_leader_pick TEXT NOT NULL,
nfc_rushing_leader_pick TEXT NOT NULL,
nfc_passing_leader_pick TEXT NOT NULL,
nfc_receiving_leader_pick TEXT NOT NULL,
nfc_sacks_leader_pick TEXT NOT NULL,
nfc_int_leader_pick TEXT NOT NULL,
afc_pf_pick TEXT NOT NULL,
nfc_pf_pick TEXT NOT NULL,
tiebreaker_pick TEXT NOT NULL
)
''')
conn.commit()
conn.close()
# TODO Make primary key autoincrement - or can email be used as the key?
| mit | Python |
4d6bbdd538fa92a107a10344bc5c8f8f0ec86de9 | add self link | olneyhymn/westminster-daily,tdhopper/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,tdhopper/westminster-daily,olneyhymn/westminster-daily,tdhopper/westminster-daily | generate_feed.py | generate_feed.py | from feedgen.feed import FeedGenerator
import datetime as dt
import pytz
from premailer import transform
import markdown
from functools import lru_cache
from bs4 import BeautifulSoup
URL = "https://reformedconfessions.com/westminster-daily"
FILENAME = "feed.rss"
NUMBER_OF_DAYS = 30
@lru_cache()
def markdown_parser(month, day):
with open(f"content/{month}/{day}.md", "r") as f:
md = f.read()
markdown_parser = markdown.Markdown(
extensions=["meta", "footnotes"],
extension_configs={"footnotes": {"BACKLINK_TEXT": ""}},
)
return markdown_parser, markdown_parser.convert(md)
def meta(month, day):
return markdown_parser(month, day)[0].Meta
def content(month, day):
md_as_html = markdown_parser(month, day)[1]
c = transform(md_as_html, preserve_internal_links=True)
soup = BeautifulSoup(c)
for a in soup.findAll("a"):
a.replaceWithChildren()
c = str(soup)
c = c[(c.find("body") + len("body>")) : -len("</body></html>")]
c = c.replace("\n", "")
c = c.replace("\xa0", " ")
return c
def main():
fg = FeedGenerator()
fg.id(f"{URL}/{FILENAME}")
fg.title("Westminster Daily")
fg.author({"name": "Westminster Daily"})
fg.subtitle("Read through the Westminster Confession and Catechisms in a year.")
fg.link(href=f"{URL}/")
fg.link(href=f"{URL}/{FILENAME}", rel="self")
fg.language("en")
now = dt.datetime.now(tz=pytz.timezone("US/Eastern"))
for date in (now - dt.timedelta(n) for n in reversed(range(NUMBER_OF_DAYS))):
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
month = date.strftime("%m")
day = date.strftime("%d")
url = f"{URL}/{month}/{day}/"
fe = fg.add_entry()
fe.id(url)
fe.title(meta(month, day)["pagetitle"][0])
fe.link(href=url)
fe.guid(url, permalink=True)
fe.content(content(month, day), type="CDATA")
fe.updated(date)
fe.published(date)
fg.rss_file(FILENAME, pretty=True) # Write the RSS feed to a file
if __name__ == "__main__":
main()
| from feedgen.feed import FeedGenerator
import datetime as dt
import pytz
from premailer import transform
import markdown
from functools import lru_cache
from bs4 import BeautifulSoup
URL = "https://reformedconfessions.com/westminster-daily"
FILENAME = "feed.rss"
NUMBER_OF_DAYS = 30
@lru_cache()
def markdown_parser(month, day):
with open(f"content/{month}/{day}.md", "r") as f:
md = f.read()
markdown_parser = markdown.Markdown(
extensions=["meta", "footnotes"],
extension_configs={"footnotes": {"BACKLINK_TEXT": ""}},
)
return markdown_parser, markdown_parser.convert(md)
def meta(month, day):
return markdown_parser(month, day)[0].Meta
def content(month, day):
md_as_html = markdown_parser(month, day)[1]
c = transform(md_as_html, preserve_internal_links=True)
soup = BeautifulSoup(c)
for a in soup.findAll("a"):
a.replaceWithChildren()
c = str(soup)
c = c[(c.find("body") + len("body>")) : -len("</body></html>")]
c = c.replace("\n", "")
c = c.replace("\xa0", " ")
return c
def main():
fg = FeedGenerator()
fg.id(f"{URL}/{FILENAME}")
fg.title("Westminster Daily")
fg.author({"name": "Westminster Daily"})
fg.subtitle("Read through the Westminster Confession and Catechisms in a year.")
fg.link(href=f"{URL}/")
fg.language("en")
now = dt.datetime.now(tz=pytz.timezone("US/Eastern"))
for date in (now - dt.timedelta(n) for n in reversed(range(NUMBER_OF_DAYS))):
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
month = date.strftime("%m")
day = date.strftime("%d")
url = f"{URL}/{month}/{day}/"
fe = fg.add_entry()
fe.id(url)
fe.title(meta(month, day)["pagetitle"][0])
fe.link(href=url)
fe.guid(url, permalink=True)
fe.content(content(month, day), type="CDATA")
fe.updated(date)
fe.published(date)
fg.rss_file(FILENAME, pretty=True) # Write the RSS feed to a file
if __name__ == "__main__":
main()
| bsd-3-clause | Python |
e2fbf646b193284fc5d01684193b9c5aeb415efe | Fix due to merge conflicts | TalkAboutLocal/local-news-engine,TalkAboutLocal/local-news-engine,TalkAboutLocal/local-news-engine,TalkAboutLocal/local-news-engine | generate_html.py | generate_html.py | from jinja2 import Environment, FileSystemLoader
import datetime
import json
env = Environment(loader=FileSystemLoader('templates'), autoescape=True)
names_template = env.get_template('names.html')
area_template = env.get_template('areas.html')
with open("output/templates.js") as templatesjs:
templates = templatesjs.read()
with open("processed/area_matches.json") as area_matches_file:
area_matches = json.load(area_matches_file)
with open('output/areas.html', 'w+') as name_output:
name_output.write(area_template.render(
templates=templates,
area_matches=area_matches,
date=datetime.date.today().isoformat(),
))
with open("processed/interesting_names.json") as interesting_names_file:
interesting_names = json.load(interesting_names_file)
with open('output/names.html', 'w+') as name_output, open("key_field_names.txt") as key_field_names_file:
key_fields = list(set([key_field_name.strip() for key_field_name in key_field_names_file]))
name_output.write(names_template.render(
templates=templates,
interesting_names=interesting_names,
interesting_names_json=json.dumps(interesting_names),
date=datetime.date.today().isoformat(),
key_fields_json=json.dumps(key_fields),
))
| from jinja2 import Environment, FileSystemLoader
import datetime
import json
env = Environment(loader=FileSystemLoader('templates'), autoescape=True)
names_template = env.get_template('names.html')
area_template = env.get_template('areas.html')
with open("output/templates.js") as templatesjs:
templates = templatesjs.read()
with open("processed/area_matches.json") as area_matches_file:
area_matches = json.load(area_matches_file)
with open('output/areas.html', 'w+') as name_output:
name_output.write(area_template.render(
templates=templates,
area_matches=area_matches,
date=datetime.date.today().isoformat(),
))
with open("processed/interesting_names.json") as interesting_names_file:
interesting_names = json.load(interesting_names_file)
with open('output/names.html', 'w+') as name_output:
name_output.write(names_template.render(
templates=templates,
interesting_names=interesting_names,
interesting_names_json=json.dumps(interesting_names),
date=datetime.date.today().isoformat(),
))
| agpl-3.0 | Python |
df000e47d8d727f10e7f922b21814c025539b8ef | Add url_shortener.validation.NotABlacklistMatch class | piotr-rusin/url-shortener,piotr-rusin/url-shortener | url_shortener/validation.py | url_shortener/validation.py | # -*- coding: utf-8 -*-
from submodules.spam_lists_lib.spam_lists import (
GoogleSafeBrowsing, HpHosts, GeneralizedUrlTester, UrlTesterChain,
SPAMHAUS_DBL, SPAMHAUS_ZEN, SURBL_MULTI
)
from wtforms.validators import ValidationError
from . import app
google_safe_browsing = GoogleSafeBrowsing(
'url-shortener',
'0.9',
app.config['GOOGLE_SAFE_BROWSING_API_KEY']
)
hp_hosts = HpHosts('url-shortener')
spam_tester = GeneralizedUrlTester(
UrlTesterChain(
SPAMHAUS_DBL,
SPAMHAUS_ZEN,
SURBL_MULTI,
hp_hosts,
google_safe_browsing
)
)
class NotABlacklistMatch():
def __init__(self, blacklist, message=None):
self.blacklist = blacklist
self.message = message
def __call__(self, form, field):
if self.is_match(field.data):
raise ValidationError(self.message)
def is_match(self, value):
return self.blacklist.any_match([value])
| # -*- coding: utf-8 -*-
from submodules.spam_lists_lib.spam_lists import (
GoogleSafeBrowsing, HpHosts, GeneralizedUrlTester, UrlTesterChain,
SPAMHAUS_DBL, SPAMHAUS_ZEN, SURBL_MULTI
)
from . import app
google_safe_browsing = GoogleSafeBrowsing(
'url-shortener',
'0.9',
app.config['GOOGLE_SAFE_BROWSING_API_KEY']
)
hp_hosts = HpHosts('url-shortener')
spam_tester = GeneralizedUrlTester(
UrlTesterChain(
SPAMHAUS_DBL,
SPAMHAUS_ZEN,
SURBL_MULTI,
hp_hosts,
google_safe_browsing
)
)
| mit | Python |
0ec1c37beecfbbaa4a1e3b38d36c67359a388cf9 | remove unused imports | fedora-conary/conary,fedora-conary/conary,fedora-conary/conary,fedora-conary/conary,fedora-conary/conary | updatecmd.py | updatecmd.py | #
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import files
import sys
import versions
def doUpdate(repos, cfg, pkg, mainPackageName):
if cfg.root == "/":
print "using srs to update to your actual system is dumb."
sys.exit(0)
for (fileId, path, version) in pkg.fileList():
infoFile = repos.getFileDB(fileId)
f = infoFile.getVersion(version)
if f.__class__ == files.SourceFile:
d = {}
d['pkgname'] = mainPackageName
path = (cfg.sourcepath) % d + "/" + path
f.restore(repos, cfg.root + path)
def update(repos, cfg, pkg, versionStr = None):
if pkg and pkg[0] != "/":
pkg = cfg.packagenamespace + "/" + pkg
if versionStr and versionStr[0] != "/":
versionStr = cfg.defaultbranch.asString() + "/" + versionStr
if versionStr:
version = versions.VersionFromString(versionStr)
else:
version = None
list = []
bail = 0
mainPackageName = None
for pkgName in repos.getPackageList(pkg):
pkgSet = repos.getPackageSet(pkgName)
if not version:
version = pkgSet.getLatestVersion(cfg.defaultbranch)
if not pkgSet.hasVersion(version):
sys.stderr.write("package %s does not contain version %s\n" %
(pkgName, version.asString()))
bail = 1
else:
pkg = pkgSet.getVersion(version)
list.append(pkg)
# sources are only in source packages, which are always
# named <pkgname>/<source>
#
# this means we can parse a simple name of the package
# out of the full package identifier
if pkgName.endswith('/sources'):
mainPackageName = pkgName.rstrip('/sources')
if bail:
return
for pkg in list:
doUpdate(repos, cfg, pkg, mainPackageName)
| #
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import package
import files
import shutil
import pwd
import grp
import files
import string
import sys
import versions
def doUpdate(repos, cfg, pkg, mainPackageName):
if cfg.root == "/":
print "using srs to update to your actual system is dumb."
import sys
sys.exit(0)
for (fileId, path, version) in pkg.fileList():
infoFile = repos.getFileDB(fileId)
f = infoFile.getVersion(version)
if f.__class__ == files.SourceFile:
d = {}
d['pkgname'] = mainPackageName
path = (cfg.sourcepath) % d + "/" + path
f.restore(repos, cfg.root + path)
def update(repos, cfg, pkg, versionStr = None):
if pkg and pkg[0] != "/":
pkg = cfg.packagenamespace + "/" + pkg
if versionStr and versionStr[0] != "/":
versionStr = cfg.defaultbranch.asString() + "/" + versionStr
if versionStr:
version = versions.VersionFromString(versionStr)
else:
version = None
list = []
bail = 0
mainPackageName = None
for pkgName in repos.getPackageList(pkg):
pkgSet = repos.getPackageSet(pkgName)
if not version:
version = pkgSet.getLatestVersion(cfg.defaultbranch)
if not pkgSet.hasVersion(version):
sys.stderr.write("package %s does not contain version %s\n" %
(pkgName, version.asString()))
bail = 1
else:
pkg = pkgSet.getVersion(version)
list.append(pkg)
# sources are only in source packages, which are always
# named <pkgname>/<source>
#
# this means we can parse a simple name of the package
# out of the full package identifier
if pkgName.endswith('/sources'):
mainPackageName = pkgName.rstrip('/sources')
if bail:
return
for pkg in list:
doUpdate(repos, cfg, pkg, mainPackageName)
| apache-2.0 | Python |
7b67d1753a678e1a24d400d4c733e82d224c11e5 | fix 19 10 tests | shtalinberg/django-el-pagination,shtalinberg/django-el-pagination,shtalinberg/django-el-pagination,shtalinberg/django-el-pagination | tests/settings.py | tests/settings.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Settings file for the Django project used for tests."""
import os
DEBUG = True
DJANGO_LIVE_TEST_SERVER_ADDRESS = os.getenv('DJANGO_LIVE_TEST_SERVER_ADDRESS',
"localhost:8000-8010,8080,9200-9300")
# Disable 1.9 arguments '--parallel'
DJANGO_TEST_PROCESSES = os.getenv('DJANGO_TEST_PROCESSES', 1) # try exclude “Address already in use” at “setUpClass”
PROJECT_NAME = 'project'
# Base paths.
ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT = os.path.join(ROOT, PROJECT_NAME)
# Django configuration.
DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}}
INSTALLED_APPS = (
'django.contrib.staticfiles',
'el_pagination',
'nose',
'django_nose',
PROJECT_NAME,
)
gettext = lambda s: s
LANGUAGES = (('en', gettext('English')),)
LANGUAGE_CODE = os.getenv('EL_PAGINATION_LANGUAGE_CODE', 'en')
ROOT_URLCONF = PROJECT_NAME + '.urls'
SECRET_KEY = os.getenv('EL_PAGINATION_SECRET_KEY', 'secret')
SITE_ID = 1
STATIC_ROOT = os.path.join(PROJECT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
PROJECT_NAME + '.context_processors.navbar',
PROJECT_NAME + '.context_processors.versions',
],
},
},
]
# Testing.
NOSE_ARGS = (
'--verbosity=2',
'--stop',
'-s', # Don't capture stdout (any stdout output will be printed immediately) [NOSE_NOCAPTURE]
# '--nomigrations',
# '--with-coverage',
# '--cover-package=el_pagination',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| """Settings file for the Django project used for tests."""
import os
DEBUG = True
DJANGO_LIVE_TEST_SERVER_ADDRESS = os.getenv('DJANGO_LIVE_TEST_SERVER_ADDRESS',
"localhost:8000-8010,8080,9200-9300")
# Disable 1.9 arguments '--parallel',
DJANGO_TEST_PROCESSES = os.getenv('DJANGO_TEST_PROCESSES', 1) # try exclude “Address already in use” at “setUpClass”
PROJECT_NAME = 'project'
# Base paths.
ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT = os.path.join(ROOT, PROJECT_NAME)
# Django configuration.
DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}}
INSTALLED_APPS = (
'django.contrib.staticfiles',
'el_pagination',
'nose',
'django_nose',
PROJECT_NAME,
)
gettext = lambda s: s
LANGUAGES = (('en', gettext('English')),)
LANGUAGE_CODE = os.getenv('EL_PAGINATION_LANGUAGE_CODE', 'en')
ROOT_URLCONF = PROJECT_NAME + '.urls'
SECRET_KEY = os.getenv('EL_PAGINATION_SECRET_KEY', 'secret')
SITE_ID = 1
STATIC_ROOT = os.path.join(PROJECT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
PROJECT_NAME + '.context_processors.navbar',
PROJECT_NAME + '.context_processors.versions',
],
},
},
]
# Testing.
NOSE_ARGS = (
'--verbosity=2',
'--stop',
'-s', # Don't capture stdout (any stdout output will be printed immediately) [NOSE_NOCAPTURE]
# '--nomigrations',
# '--with-coverage',
# '--cover-package=el_pagination',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| mit | Python |
8b9b77cc01cfca4dd477f4ba72b4476a6ed95758 | Bump to version 0.2. | signalfx/signalfx-python,signalfx/signalfx-python | signalfx/version.py | signalfx/version.py | # Copyright (C) 2015 SignalFx, Inc. All rights reserved.
name = 'signalfx'
version = '0.2'
| # Copyright (C) 2015 SignalFx, Inc. All rights reserved.
name = 'signalfx'
version = '0.1'
| apache-2.0 | Python |
1d34f417e13164a42470beaee09cc24dbbf8174e | revert back to 32bit python | sgibbes/zonal_stats_app | utilities/zstats_handler.py | utilities/zstats_handler.py | import subprocess
import arcpy
import os
def main_script(layer, raster, method):
# add to this if i'm running average area of zstats
final_aoi = layer.final_aoi
start_id = 0
end_id = int(arcpy.GetCount_management(final_aoi).getOutput(0))
print "Number of features: {}".format(end_id)
zstats_subprocess = None
if method == 'zonal_stats':
zstats_subprocess = os.path.join(layer.root_dir, "utilities", "zstats_subprocess.py")
if method == 'average_area':
print "method is average"
zstats_subprocess = os.path.join(layer.root_dir, "utilities", "average_area.py")
script_cmd = [r"C:\Python27\ArcGIS10.4\python.exe", zstats_subprocess, raster.value,
raster.zone, layer.final_aoi, raster.cellsize, raster.analysis]
cmd = script_cmd + [str(start_id), str(end_id)]
expected_complete_total = len(range(start_id, end_id))
feature_status = {}
while len(feature_status) < expected_complete_total:
# this runs the analysis
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
arcpy.AddMessage(line.rstrip())
# Each line that comes back from the subprocess represents 1 feature/ID
# We need to keep track of this in case a feature fails so we can skip it
if 'debug' in line:
pass
else:
if 'process succeeded' in line:
feature_status[start_id] = True
start_id += 1
p.wait()
# Since no lines are returned from sub if it fails, add this: get the return code from the failure,
# as long as it isn't 0, its a failure, and increment the counter by 1 so it starts on the next feautre
if p.returncode != 0:
feature_status[start_id] = False
start_id += 1
| import subprocess
import arcpy
import os
def main_script(layer, raster, method):
# add to this if i'm running average area of zstats
final_aoi = layer.final_aoi
start_id = 0
end_id = int(arcpy.GetCount_management(final_aoi).getOutput(0))
print "Number of features: {}".format(end_id)
zstats_subprocess = None
if method == 'zonal_stats':
zstats_subprocess = os.path.join(layer.root_dir, "utilities", "zstats_subprocess.py")
if method == 'average_area':
print "method is average"
zstats_subprocess = os.path.join(layer.root_dir, "utilities", "average_area.py")
script_cmd = [r"C:\Python27\ArcGISx6410.4\python.exe", zstats_subprocess, raster.value,
raster.zone, layer.final_aoi, raster.cellsize, raster.analysis]
cmd = script_cmd + [str(start_id), str(end_id)]
expected_complete_total = len(range(start_id, end_id))
feature_status = {}
while len(feature_status) < expected_complete_total:
# this runs the analysis
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
arcpy.AddMessage(line.rstrip())
# Each line that comes back from the subprocess represents 1 feature/ID
# We need to keep track of this in case a feature fails so we can skip it
if 'debug' in line:
pass
else:
if 'process succeeded' in line:
feature_status[start_id] = True
start_id += 1
p.wait()
# Since no lines are returned from sub if it fails, add this: get the return code from the failure,
# as long as it isn't 0, its a failure, and increment the counter by 1 so it starts on the next feautre
if p.returncode != 0:
feature_status[start_id] = False
start_id += 1
| apache-2.0 | Python |
3977993b8bc8a846df1a5e5fe792d6f242009b03 | add gzip deflation to HtmlFetcher | AGoodId/python-goose,AGoodId/python-goose | goose/network.py | goose/network.py | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib2
import zlib
class HtmlFetcher(object):
def __init__(self, config):
self.config = config
# set header
self.headers = {'User-agent': self.config.browser_user_agent}
def get_url(self):
# if we have a result
# get the final_url
if self.result is not None:
return self.result.geturl()
return None
def get_html(self, url):
# utf-8 encode unicode url
if isinstance(url, unicode):
url = url.encode('utf-8')
# set request
self.request = urllib2.Request(
url,
headers=self.headers)
# do request
try:
self.result = urllib2.urlopen(
self.request,
timeout=self.config.http_timeout)
except Exception:
self.result = None
# read the result content
if self.result is not None:
if self.result.info().get('Content-Encoding') == 'gzip':
return zlib.decompress(self.result.read(), zlib.MAX_WBITS | 16)
return self.result.read()
return None
| # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib2
class HtmlFetcher(object):
def __init__(self, config):
self.config = config
# set header
self.headers = {'User-agent': self.config.browser_user_agent}
def get_url(self):
# if we have a result
# get the final_url
if self.result is not None:
return self.result.geturl()
return None
def get_html(self, url):
# utf-8 encode unicode url
if isinstance(url, unicode):
url = url.encode('utf-8')
# set request
self.request = urllib2.Request(
url,
headers=self.headers)
# do request
try:
self.result = urllib2.urlopen(
self.request,
timeout=self.config.http_timeout)
except Exception:
self.result = None
# read the result content
if self.result is not None:
return self.result.read()
return None
| apache-2.0 | Python |
266efa9d2e56fe3767111d5df9b8a554bec8a18a | fix attack | Groupe24/CodeInSpace | tools/run_game.py | tools/run_game.py | #!/usr/bin/env python
import os
import sys
os.chdir('../game')
sys.path.insert(0, '../game')
import coders_in_space
coders_in_space.play_game('board/test_board.cis', ('bot3','dumby'), screen_size=(190, 50), no_gui = False, no_splash = True, max_rounds_count = 10)
raw_input('Press Enter to continue...')
| #!/usr/bin/env python
import os
import sys
os.chdir('../game')
sys.path.insert(0, '../game')
import coders_in_space
coders_in_space.play_game('board/test_board.cis', ('dumbinspace','dumby'), screen_size=(190, 50), no_gui = False, no_splash = True, max_rounds_count = 10)
raw_input('Press Enter to continue...')
| mit | Python |
5d998a431ea5c531b78979e4ad69b1729369eab1 | Make all preview video 1080p | cgwire/zou | zou/app/utils/movie_utils.py | zou/app/utils/movie_utils.py | import os
from moviepy.editor import VideoFileClip
def save_file(tmp_folder, instance_id, file_to_save):
"""
Save given file in given path. This function should only be used for
temporary storage.
"""
extension = file_to_save.filename[-4:]
file_name = instance_id + extension.lower() + ".tmp"
file_path = os.path.join(tmp_folder, file_name)
file_to_save.save(file_path)
return file_path
def generate_thumbnail(movie_path):
"""
Generate a thumbnail to represent the movie given at movie path. It
takes a picture at the middle of the movie.
"""
folder_path = os.path.dirname(movie_path)
file_source_name = os.path.basename(movie_path)
file_target_name = "%s.png" % file_source_name[:-4]
file_target_path = os.path.join(folder_path, file_target_name)
movie_clip = VideoFileClip(movie_path)
movie_clip.save_frame(file_target_path, 0)
return file_target_path
def normalize_movie(movie_path, height=1080, qscale='2'):
"""
Turn movie in a 720p movie file.
"""
folder_path = os.path.dirname(movie_path)
file_source_name = os.path.basename(movie_path)
file_target_name = "%s.mp4" % file_source_name[:-8]
file_target_path = os.path.join(folder_path, file_target_name)
movie_clip = VideoFileClip(movie_path)
movie_clip = movie_clip.resize(height=height)
movie_clip.write_videofile(
file_target_path,
ffmpeg_params=['-qscale:v', qscale]
)
return file_target_path
| import os
from moviepy.editor import VideoFileClip
def save_file(tmp_folder, instance_id, file_to_save):
"""
Save given file in given path. This function should only be used for
temporary storage.
"""
extension = file_to_save.filename[-4:]
file_name = instance_id + extension.lower() + ".tmp"
file_path = os.path.join(tmp_folder, file_name)
file_to_save.save(file_path)
return file_path
def generate_thumbnail(movie_path):
"""
Generate a thumbnail to represent the movie given at movie path. It
takes a picture at the middle of the movie.
"""
folder_path = os.path.dirname(movie_path)
file_source_name = os.path.basename(movie_path)
file_target_name = "%s.png" % file_source_name[:-4]
file_target_path = os.path.join(folder_path, file_target_name)
movie_clip = VideoFileClip(movie_path)
movie_clip.save_frame(file_target_path, 0)
return file_target_path
def normalize_movie(movie_path):
"""
Turn movie in a 720p movie file.
"""
folder_path = os.path.dirname(movie_path)
file_source_name = os.path.basename(movie_path)
file_target_name = "%s.mp4" % file_source_name[:-8]
file_target_path = os.path.join(folder_path, file_target_name)
movie_clip = VideoFileClip(movie_path)
movie_clip = movie_clip.resize(height=720)
movie_clip.write_videofile(file_target_path)
return file_target_path
| agpl-3.0 | Python |
5d3e8d7bcd553a1e60803cf7b72c6ed1eaa5ba0b | Optimize imports | joaolrpaulo/eletronic-voting-system,joaolrpaulo/eletronic-voting-system,joaolrpaulo/eletronic-voting-system | voting-server/app/errors.py | voting-server/app/errors.py | from app import app
from flask import jsonify
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import default_exceptions
def json_error(error):
response = jsonify(message = str(error))
response.status_code = error.code if isinstance(error, HTTPException) else 500
return response
for code in default_exceptions.keys():
app.register_error_handler(code, json_error)
| from app import app
from flask import jsonify
from werkzeug.exceptions import HTTPException, default_exceptions
def json_error(error):
response = jsonify(message = str(error))
print(str(error))
response.status_code = error.code if isinstance(error, HTTPException) else 500
return response
for code in default_exceptions.keys():
app.register_error_handler(code, json_error)
| mit | Python |
3cb40d4dbc7a34050ed563778c215ca1e12e2d90 | Bump version to 0.4dev0 | jacebrowning/gdm,jacebrowning/gitman | gdm/__init__.py | gdm/__init__.py | """Package for GDM."""
import sys
__project__ = 'GDM'
__version__ = '0.4dev0'
CLI = 'gdm'
VERSION = __project__ + '-' + __version__
DESCRIPTION = 'A very basic language-agnostic "dependency manager" using Git.'
PYTHON_VERSION = 3, 3
if not sys.version_info >= PYTHON_VERSION: # pragma: no cover (manual test)
exit("Python {}.{}+ is required.".format(*PYTHON_VERSION))
try:
from .commands import install, uninstall
except ImportError: # pragma: no cover (manual test)
pass
| """Package for GDM."""
import sys
__project__ = 'GDM'
__version__ = '0.3'
CLI = 'gdm'
VERSION = __project__ + '-' + __version__
DESCRIPTION = 'A very basic language-agnostic "dependency manager" using Git.'
PYTHON_VERSION = 3, 3
if not sys.version_info >= PYTHON_VERSION: # pragma: no cover (manual test)
exit("Python {}.{}+ is required.".format(*PYTHON_VERSION))
try:
from .commands import install, uninstall
except ImportError: # pragma: no cover (manual test)
pass
| mit | Python |
7b11ea6792b239fa9930f4fd5bfccef85e50103f | Include date | jml/edn-profiling | gen-much-edn.py | gen-much-edn.py | """Generate some edn records for profiling."""
import datetime
import decimal
import random
import sys
import edn
DICTIONARY_FILE = '/usr/share/dict/words'
def load_words(dictionary):
with open(dictionary, 'r') as dictionary_file:
return [x.strip() for x in dictionary_file.readlines()]
WORDS = load_words(DICTIONARY_FILE)
def random_words(n):
for i in range(n):
word = random.choice(WORDS)
try:
yield word.decode('ascii')
except UnicodeDecodeError:
continue
def random_decimal():
value = random.randint(-500000, 500000) / 100.0
return decimal.Decimal(value).quantize(decimal.Decimal('0.01'))
def random_day():
return datetime.date(2013, 1, 1) + datetime.timedelta(random.randint(0, 365))
def make_element():
return {edn.Keyword('description'): ' '.join(random_words(3)),
edn.Keyword('amount'): random_decimal(),
edn.Keyword('date'): random_day()}
num = int(sys.argv[1])
for i in range(num):
print edn.dumps(
make_element(),
[(datetime.date, edn.Symbol('day'), lambda x: x.strftime('%Y-%m-%d'))],
)
| """Generate some edn records for profiling."""
import decimal
import random
import sys
import edn
DICTIONARY_FILE = '/usr/share/dict/words'
def load_words(dictionary):
with open(dictionary, 'r') as dictionary_file:
return [x.strip() for x in dictionary_file.readlines()]
WORDS = load_words(DICTIONARY_FILE)
def random_words(n):
for i in range(n):
word = random.choice(WORDS)
try:
yield word.decode('ascii')
except UnicodeDecodeError:
continue
def random_decimal():
value = random.randint(-500000, 500000) / 100.0
return decimal.Decimal(value).quantize(decimal.Decimal('0.01'))
def make_element():
return {edn.Keyword('foo'): ' '.join(random_words(3)),
edn.Keyword('bar'): random_decimal()}
num = int(sys.argv[1])
for i in range(num):
print edn.dumps(make_element())
| apache-2.0 | Python |
0ed9e159fa606c9dbdb90dfc64fcb357e9f9cedb | Fix wrong import in test | evernym/zeno,evernym/plenum | plenum/test/test_request.py | plenum/test/test_request.py | from plenum.common.request import Request
def test_request_all_identifiers_returns_empty_list_for_request_without_signatures():
req = Request()
assert req.all_identifiers == [] | from indy_common.types import Request
def test_request_all_identifiers_returns_empty_list_for_request_without_signatures():
req = Request()
assert req.all_identifiers == [] | apache-2.0 | Python |
219c773fcf4fb98201e90aab77a27e090799d2c1 | fix flock-related tests on linux/mac after r1646 | old8xp/gyp_from_google,old8xp/gyp_from_google,old8xp/gyp_from_google,old8xp/gyp_from_google,old8xp/gyp_from_google | test/make_global_settings/basics/gyptest-make_global_settings.py | test/make_global_settings/basics/gyptest-make_global_settings.py | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies make_global_settings.
"""
import os
import sys
import TestGyp
test_format = ['ninja']
if sys.platform in ('linux2', 'darwin'):
test_format += ['make']
test = TestGyp.TestGyp(formats=test_format)
test.run_gyp('make_global_settings.gyp')
if test.format == 'make':
cc_expected = """ifneq (,$(filter $(origin CC), undefined default))
CC = $(abspath clang)
endif
"""
if sys.platform == 'linux2':
link_expected = """
LINK ?= flock $(builddir)/linker.lock $(abspath clang++)
"""
elif sys.platform == 'darwin':
link_expected = """
LINK ?= ./gyp-mac-tool flock $(builddir)/linker.lock $(abspath clang++)
"""
test.must_contain('Makefile', cc_expected)
test.must_contain('Makefile', link_expected)
if test.format == 'ninja':
cc_expected = 'cc = ' + os.path.join('..', '..', 'clang')
ld_expected = 'ld = $cxx'
if sys.platform == 'win32':
ld_expected = 'link.exe'
test.must_contain('out/Default/build.ninja', cc_expected)
test.must_contain('out/Default/build.ninja', ld_expected)
test.pass_test()
| #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies make_global_settings.
"""
import os
import sys
import TestGyp
test_format = ['ninja']
if sys.platform in ('linux2', 'darwin'):
test_format += ['make']
test = TestGyp.TestGyp(formats=test_format)
test.run_gyp('make_global_settings.gyp')
if test.format == 'make':
cc_expected = """ifneq (,$(filter $(origin CC), undefined default))
CC = $(abspath clang)
endif
"""
if sys.platform == 'linux2':
link_expected = """
LINK ?= flock $(builddir)/linker.lock $(abspath clang++)
"""
elif sys.platform == 'darwin':
link_expected = """
LINK ?= ./gyp-mac-tool flock $(builddir)/linker.lock $(abspath clang++)
"""
test.must_contain('Makefile', cc_expected)
test.must_contain('Makefile', link_expected)
if test.format == 'ninja':
cc_expected = 'cc = ' + os.path.join('..', '..', 'clang')
ld_expected = 'ld = flock linker.lock $cxx'
if sys.platform == 'darwin':
ld_expected = './gyp-mac-tool flock linker.lock $cxx'
elif sys.platform == 'win32':
ld_expected = 'link.exe'
test.must_contain('out/Default/build.ninja', cc_expected)
test.must_contain('out/Default/build.ninja', ld_expected)
test.pass_test()
| bsd-3-clause | Python |
da50e920ecfde15b90fe4380a7c31130112342a7 | fix failing unit test: IG error code reinterpreted as 401 | snarfed/oauth-dropins,snarfed/oauth-dropins,snarfed/oauth-dropins | handlers_test.py | handlers_test.py | """Unit tests for handlers.py.
"""
__author__ = ['Ryan Barrett <oauth-dropins@ryanb.org>']
import StringIO
import urllib2
import json
import apiclient.errors
import httplib2
from oauth2client.client import AccessTokenRefreshError
import requests
from webob import exc
import handlers
from webutil import util
from webutil import testutil
class HandlersTest(testutil.HandlerTest):
def test_interpret_http_exception(self):
ihc = handlers.interpret_http_exception
self.assertEquals(('402', '402 Payment Required\n\nmy body'), ihc(
exc.HTTPPaymentRequired(body_template='my body')))
self.assertEquals(('429', 'my body'), ihc(
apiclient.errors.HttpError(httplib2.Response({'status': 429}), 'my body')))
self.assertEquals(('429', 'my body'), ihc(
urllib2.HTTPError('url', 429, 'msg', {}, StringIO.StringIO('my body'))))
self.assertEquals((None, 'foo bar'), ihc(urllib2.URLError('foo bar')))
self.assertEquals(('429', 'my body'), ihc(
requests.HTTPError(response=util.Struct(status_code='429', text='my body'))))
self.assertEquals((None, None), ihc(AccessTokenRefreshError('invalid_foo')))
self.assertEquals(('401', None), ihc(AccessTokenRefreshError('invalid_grant')))
# this is the type of response we get back from instagram.
# because it means the source should be disabled, we convert the status code 400 to 401
ig_token_error = json.dumps({
"meta": {
"error_type": "OAuthAccessTokenException",
"code": 400,
"error_message": "The access_token provided is invalid."
}
})
self.assertEquals(('401', ig_token_error), ihc(urllib2.HTTPError(
'url', 400, 'BAD REQUEST', {}, StringIO.StringIO(ig_token_error))))
| """Unit tests for handlers.py.
"""
__author__ = ['Ryan Barrett <oauth-dropins@ryanb.org>']
import StringIO
import urllib2
import json
import apiclient.errors
import httplib2
from oauth2client.client import AccessTokenRefreshError
import requests
from webob import exc
import handlers
from webutil import util
from webutil import testutil
class HandlersTest(testutil.HandlerTest):
def test_interpret_http_exception(self):
ihc = handlers.interpret_http_exception
self.assertEquals(('402', '402 Payment Required\n\nmy body'), ihc(
exc.HTTPPaymentRequired(body_template='my body')))
self.assertEquals(('429', 'my body'), ihc(
apiclient.errors.HttpError(httplib2.Response({'status': 429}), 'my body')))
self.assertEquals(('429', 'my body'), ihc(
urllib2.HTTPError('url', 429, 'msg', {}, StringIO.StringIO('my body'))))
self.assertEquals((None, 'foo bar'), ihc(urllib2.URLError('foo bar')))
self.assertEquals(('429', 'my body'), ihc(
requests.HTTPError(response=util.Struct(status_code='429', text='my body'))))
self.assertEquals((None, None), ihc(AccessTokenRefreshError('invalid_foo')))
self.assertEquals(('401', None), ihc(AccessTokenRefreshError('invalid_grant')))
# this is the type of response we get back from instagram
ig_token_error = json.dumps({
"meta": {
"error_type": "OAuthAccessTokenException",
"code": 400,
"error_message": "The access_token provided is invalid."
}
})
self.assertEquals(('400', ig_token_error), ihc(urllib2.HTTPError(
'url', 400, 'BAD REQUEST', {}, StringIO.StringIO(ig_token_error))))
| unlicense | Python |
066526af8c7cff956c6a76c64e7f5fcf4ee2ed9b | remove comodel name | luc-demeyer/account-financial-reporting | account_financial_report_qweb/wizard/open_invoice_wizard.py | account_financial_report_qweb/wizard/open_invoice_wizard.py | # -*- coding: utf-8 -*-
# Author: Andrea andrea4ever Gallina
# Author: Francesco OpenCode Apruzzese
# Author: Ciro CiroBoxHub Urselli
# Copyright 2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
from openerp.exceptions import Warning as UserError
from datetime import datetime
class OpenInvoiceWizard(models.TransientModel):
_name = 'open.invoice.wizard'
company_id = fields.Many2one(
'res.company', required=True,
default=lambda s: s.env.user.company_id)
at_date = fields.Date(
required=True,
default=fields.Date.to_string(datetime.today()))
partner_ids = fields.Many2many(
'res.partner', string='Filter partners')
amount_currency = fields.Boolean(
"With Currency", help="It adds the currency column")
group_by_currency = fields.Boolean(
"Group Partner by currency", help="It adds the currency column")
result_selection = fields.Selection([
('customer', 'Receivable Accounts'),
('supplier', 'Payable Accounts'),
('customer_supplier', 'Receivable and Payable Accounts')],
"Partner's", required=True, default='customer')
target_move = fields.Selection([
('posted', 'All Posted Entries'),
('all', 'All Entries')], 'Target Moves',
required=True, default='all')
until_date = fields.Date(
"Clearance date", required=True,
help="""The clearance date is essentially a tool used for debtors
provisionning calculation.
By default, this date is equal to the the end date (
ie: 31/12/2011 if you select fy 2011).
By amending the clearance date, you will be, for instance,
able to answer the question : 'based on my last
year end debtors open invoices, which invoices are still
unpaid today (today is my clearance date)?'""")
@api.onchange('at_date')
def onchange_atdate(self):
self.until_date = self.at_date
@api.onchange('until_date')
def onchange_untildate(self):
# ---- until_date must be always >= of at_date
if self.until_date:
if self.until_date < self.at_date:
raise UserError(
'Until Date must be equal or greater then At Date')
@api.multi
def print_report(self):
pass
| # -*- coding: utf-8 -*-
# Author: Andrea andrea4ever Gallina
# Author: Francesco OpenCode Apruzzese
# Author: Ciro CiroBoxHub Urselli
# Copyright 2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
from openerp.exceptions import Warning as UserError
from datetime import datetime
class OpenInvoiceWizard(models.TransientModel):
_name = 'open.invoice.wizard'
company_id = fields.Many2one(
'res.company', required=True,
default=lambda s: s.env.user.company_id)
at_date = fields.Date(
required=True,
default=fields.Date.to_string(datetime.today()))
partner_ids = fields.Many2many(
'res.partner', comodel_name='res.partner',
string='Filter partners',)
amount_currency = fields.Boolean(
"With Currency", help="It adds the currency column")
group_by_currency = fields.Boolean(
"Group Partner by currency", help="It adds the currency column")
result_selection = fields.Selection([
('customer', 'Receivable Accounts'),
('supplier', 'Payable Accounts'),
('customer_supplier', 'Receivable and Payable Accounts')],
"Partner's", required=True, default='customer')
target_move = fields.Selection([
('posted', 'All Posted Entries'),
('all', 'All Entries')], 'Target Moves',
required=True, default='all')
until_date = fields.Date(
"Clearance date", required=True,
help="""The clearance date is essentially a tool used for debtors
provisionning calculation.
By default, this date is equal to the the end date (
ie: 31/12/2011 if you select fy 2011).
By amending the clearance date, you will be, for instance,
able to answer the question : 'based on my last
year end debtors open invoices, which invoices are still
unpaid today (today is my clearance date)?'""")
@api.onchange('at_date')
def onchange_atdate(self):
self.until_date = self.at_date
@api.onchange('until_date')
def onchange_untildate(self):
# ---- until_date must be always >= of at_date
if self.until_date:
if self.until_date < self.at_date:
raise UserError(
'Until Date must be equal or greater then At Date')
@api.multi
def print_report(self):
pass
| agpl-3.0 | Python |
240ad133b9d80f324ae3613937f04b3c2737128d | bump version | AndrewWalker/glud | glud/version.py | glud/version.py | __version__ = '0.3.4-rc0'
| __version__ = '0.3.3'
| mit | Python |
1f6829d83fcf354e63b4e758a40c199aa466be92 | Change text to be more accurate | GeoSensorWebLab/arctic-biomap-server,GeoSensorWebLab/arctic-biomap-server,GeoSensorWebLab/arctic-biomap-server,johan--/arctic-biomap-server,johan--/arctic-biomap-server,johan--/arctic-biomap-server | backend/server.py | backend/server.py | #!/usr/bin/env python
import time
import os
import sys
import logging
import tornado.httpserver
import tornado.ioloop
import tornado.web
import json
import urllib
import os
import time
import sys
from service.frontend import LoginHandler
from service.frontend import UsersHandler
from service.frontend import SightingsHandler
from service.frontend import ImagesHandler
bind_ip = "0.0.0.0"
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "www/static"),
"template_path": os.path.join(os.path.dirname(__file__), "www/template"),
"cookie_secret": os.getenv('COOKIE', ''),
"debug": True,
}
application = tornado.web.Application([
# (r"/static/(.*)", tornado.web.StaticFileHandler, dict(path=settings['static_path'])),
(r"/biomap/login", LoginHandler),
(r"/biomap/users", UsersHandler),
(r"/biomap/sightings", SightingsHandler),
(r"/biomap/images", ImagesHandler),
], **settings)
def main(ip, port):
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port, ip)
print("Loading HTTP Server on %s:%i" % (ip, port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except:
port = 8081
main(bind_ip, port)
| #!/usr/bin/env python
import time
import os
import sys
import logging
import tornado.httpserver
import tornado.ioloop
import tornado.web
import json
import urllib
import os
import time
import sys
from service.frontend import LoginHandler
from service.frontend import UsersHandler
from service.frontend import SightingsHandler
from service.frontend import ImagesHandler
bind_ip = "0.0.0.0"
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "www/static"),
"template_path": os.path.join(os.path.dirname(__file__), "www/template"),
"cookie_secret": os.getenv('COOKIE', ''),
"debug": True,
}
application = tornado.web.Application([
# (r"/static/(.*)", tornado.web.StaticFileHandler, dict(path=settings['static_path'])),
(r"/biomap/login", LoginHandler),
(r"/biomap/users", UsersHandler),
(r"/biomap/sightings", SightingsHandler),
(r"/biomap/images", ImagesHandler),
], **settings)
def main(ip, port):
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port, ip)
print("Starting HTTP Server on %s:%i" % (ip, port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except:
port = 8081
main(bind_ip, port)
| mit | Python |
2d13103ff723e60edcabc2cd3121bc79fd7a4a1f | Check if ltihooks is present before trying to delete it | freedesktop-unofficial-mirror/gstreamer__gst-python,GStreamer/gst-python,lubosz/gst-python,freedesktop-unofficial-mirror/gstreamer__gst-python,pexip/gst-python,freedesktop-unofficial-mirror/gstreamer__gst-python,GStreamer/gst-python,freedesktop-unofficial-mirror/gstreamer-sdk__gst-python,lubosz/gst-python,freedesktop-unofficial-mirror/gstreamer-sdk__gst-python,alessandrod/gst-python,freedesktop-unofficial-mirror/gstreamer-sdk__gst-python,pexip/gst-python,freedesktop-unofficial-mirror/gstreamer-sdk__gst-python,lubosz/gst-python,pexip/gst-python,GStreamer/gst-python,alessandrod/gst-python,alessandrod/gst-python | gst/__init__.py | gst/__init__.py | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# gst-python
# Copyright (C) 2002 David I. Lehn
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: David I. Lehn <dlehn@users.sourceforge.net>
__ltihooks_used__ = False
try:
import ltihooks
__ltihooks_used__ = True
except:
pass
import pygtk
pygtk.require('2.0')
import gobject
del gobject
try:
import sys, DLFCN
sys.setdlopenflags(DLFCN.RTLD_LAZY | DLFCN.RTLD_GLOBAL)
del sys, DLFCN
except ImportError:
pass
class Value:
def __init__(self, type):
assert type in ('fourcc', 'intrange', 'doublerange', 'fraction')
self.type = type
class Fourcc(Value):
def __init__(self, string):
Value.__init__(self, 'fourcc')
self.fourcc = string
def __repr__(self):
return '<gst.Fourcc %s>' % self.fourcc
class IntRange(Value):
def __init__(self, low, high):
Value.__init__(self, 'intrange')
self.low = low
self.high = high
def __repr__(self):
return '<gst.IntRange [%d, %d]>' % (self.low, self.high)
class DoubleRange(Value):
def __init__(self, low, high):
Value.__init__(self, 'doublerange')
self.low = low
self.high = high
def __repr__(self):
return '<gst.DoubleRange [%f, %f]>' % (self.low, self.high)
class Fraction(Value):
def __init__(self, num, denom):
Value.__init__(self, 'fraction')
self.num = num
self.denom = denom
def __repr__(self):
return '<gst.Fraction %d/%d>' % (self.num, self.denom)
from _gst import *
import interfaces
# this restores previously installed importhooks, so we don't interfere
# with other people's module importers
# it also clears out the module completely as if it were never loaded,
# so that if anyone else imports ltihooks the hooks get installed
if __ltihooks_used__:
ltihooks.uninstall()
__ltihooks_used__ = False
del ltihooks
import sys
if 'ltihooks' in sys.modules:
del sys.modules['ltihooks']
| # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# gst-python
# Copyright (C) 2002 David I. Lehn
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: David I. Lehn <dlehn@users.sourceforge.net>
__ltihooks_used__ = False
try:
import ltihooks
__ltihooks_used__ = True
except:
pass
import pygtk
pygtk.require('2.0')
import gobject
del gobject
try:
import sys, DLFCN
sys.setdlopenflags(DLFCN.RTLD_LAZY | DLFCN.RTLD_GLOBAL)
del sys, DLFCN
except ImportError:
pass
class Value:
def __init__(self, type):
assert type in ('fourcc', 'intrange', 'doublerange', 'fraction')
self.type = type
class Fourcc(Value):
def __init__(self, string):
Value.__init__(self, 'fourcc')
self.fourcc = string
def __repr__(self):
return '<gst.Fourcc %s>' % self.fourcc
class IntRange(Value):
def __init__(self, low, high):
Value.__init__(self, 'intrange')
self.low = low
self.high = high
def __repr__(self):
return '<gst.IntRange [%d, %d]>' % (self.low, self.high)
class DoubleRange(Value):
def __init__(self, low, high):
Value.__init__(self, 'doublerange')
self.low = low
self.high = high
def __repr__(self):
return '<gst.DoubleRange [%f, %f]>' % (self.low, self.high)
class Fraction(Value):
def __init__(self, num, denom):
Value.__init__(self, 'fraction')
self.num = num
self.denom = denom
def __repr__(self):
return '<gst.Fraction %d/%d>' % (self.num, self.denom)
from _gst import *
import interfaces
# this restores previously installed importhooks, so we don't interfere
# with other people's module importers
# it also clears out the module completely as if it were never loaded,
# so that if anyone else imports ltihooks the hooks get installed
if __ltihooks_used__:
ltihooks.uninstall()
__ltihooks_used__ = False
del ltihooks
import sys
del sys.modules['ltihooks']
| lgpl-2.1 | Python |
fdd4b3c4323499e067a794674a5f72e4764ea81f | Add column extraction for html tables to tools | mzunhammer/hracing | hracing/tools.py | hracing/tools.py | def flatten(l, ltypes=(list, tuple)):
""" Flattens nested lists l to yield a 1-d list"""
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def cols_from_html_tbl(tbl):
""" Extracts columns from html-table tbl and puts columns in a list.
tbl must be a results-object from BeautifulSoup)"""
rows = tbl.find_all('tr')
for row in rows:
cols = row.find_all('td')
for i,cell in enumerate(cols):
if not'col_list' in locals():
col_list=[[] for x in range(len(cols))]
col_list[i].append(cell.text)
return col_list
def isnumber(s):
""" Checks if string can be converted to float and is >0"""
try:
f=float(s.replace(',','.'))
if f > 0:
return True
else:
return False
except ValueError:
return False | def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
a = []
for i in range(2000):
a = [a, i]
a = flatten(a) | mit | Python |
67e8b41d33360e2fb895e051005e36b95d4187ea | fix inheritance pattern of btax mock classes | OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain | webapp/apps/btax/compute.py | webapp/apps/btax/compute.py | import dropq
import os
from ..taxbrain.models import WorkerNodesCounter
import json
import requests
from requests.exceptions import Timeout, RequestException
from .helpers import arrange_totals_by_row
from ..taxbrain.compute import (DropqCompute,
MockCompute,
MockFailedCompute,
NodeDownCompute,
JobFailError,
ENFORCE_REMOTE_VERSION_CHECK,
TIMEOUT_IN_SECONDS,
dropq_version)
import requests_mock
requests_mock.Mocker.TEST_PREFIX = 'dropq'
def package_up_vars(self, user_mods, first_budget_year):
# TODO - is first_budget_year important here?
user_mods = {k: v for k, v in user_mods.iteritems()
if k.startswith(('btax_', 'start_year'))}
user_mods = {k: (v[0] if hasattr(v, '__getitem__') else v)
for k, v in user_mods.iteritems()}
return user_mods
def dropq_get_results(self, job_ids):
ans = self._get_results_base(job_ids)
return ans
class DropqComputeBtax(DropqCompute):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
class MockComputeBtax(MockCompute):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
class MockFailedComputeBtax(MockFailedCompute):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
class NodeDownComputeBtax(NodeDownCompute):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
| import dropq
import os
from ..taxbrain.models import WorkerNodesCounter
import json
import requests
from requests.exceptions import Timeout, RequestException
from .helpers import arrange_totals_by_row
from ..taxbrain.compute import (DropqCompute,
MockCompute,
MockFailedCompute,
JobFailError,
ENFORCE_REMOTE_VERSION_CHECK,
TIMEOUT_IN_SECONDS,
dropq_version)
import requests_mock
requests_mock.Mocker.TEST_PREFIX = 'dropq'
def package_up_vars(self, user_mods, first_budget_year):
# TODO - is first_budget_year important here?
user_mods = {k: v for k, v in user_mods.iteritems()
if k.startswith(('btax_', 'start_year'))}
user_mods = {k: (v[0] if hasattr(v, '__getitem__') else v)
for k, v in user_mods.iteritems()}
return user_mods
def dropq_get_results(self, job_ids):
ans = self._get_results_base(job_ids)
return ans
class DropqComputeBtax(DropqCompute):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
class MockComputeBtax(MockCompute):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
class MockFailedComputeBtax(MockComputeBtax):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
class NodeDownComputeBtax(MockComputeBtax):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = dropq_get_results
| mit | Python |
abe1872e51fa2beeb2fa7deee87ceb01753ef854 | Remove UTF-8 comment. | dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments | pygments/styles/lilypond.py | pygments/styles/lilypond.py | """
pygments.styles.lilypond
~~~~~~~~~~~~~~~~~~~~~~~~
LilyPond-specific style.
:copyright: Copyright 2021-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Token
class LilypondStyle(Style):
"""
Style for the LilyPond language.
.. versionadded:: 2.11
"""
default_style = "#0000ff"
styles = {
Token.Whitespace: "",
Token.Text: "",
Token.Keyword: "bold",
Token.Comment: "italic #A3AAB2",
Token.String: "#AB0909",
Token.String.Escape: "#C46C6C",
Token.String.Symbol: "noinherit",
Token.Pitch: "", #"#911520",
Token.Number: "#976806", # includes durations
# A bare 11 is not distinguishable from a number, so we highlight
# the same.
Token.ChordModifier: "#976806",
Token.Name.Lvalue: "#08547A",
Token.Name.BackslashReference: "#08547A",
Token.Name.Builtin.MusicCommand: "bold #08547A",
Token.Name.Builtin.PaperVariable: "bold #6C5A05",
Token.Name.Builtin.HeaderVariable: "bold #6C5A05",
Token.Name.Builtin.MusicFunction: "bold #08547A",
Token.Name.Builtin.Clef: "bold #08547A",
Token.Name.Builtin.Scale: "bold #08547A",
Token.Name.Builtin.RepeatType: "#08547A",
Token.Name.Builtin.Dynamic: "#68175A",
Token.Name.Builtin.Articulation: "#68175A",
Token.Name.Builtin.SchemeFunction: "bold #A83401",
Token.Name.Builtin.SchemeBuiltin: "bold",
Token.Name.Builtin.MarkupCommand: "bold #831E71",
Token.Name.Builtin.Context: "bold #038B8B",
Token.Name.Builtin.ContextProperty: "#038B8B",
Token.Name.Builtin.Grob: "bold #0C7441",
Token.Name.Builtin.GrobProperty: "#0C7441",
Token.Name.Builtin.Translator: "bold #6200A4",
}
| # -*- coding: utf-8 -*-
"""
pygments.styles.lilypond
~~~~~~~~~~~~~~~~~~~~~~~~
LilyPond-specific style.
:copyright: Copyright 2021-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Token
class LilypondStyle(Style):
"""
Style for the LilyPond language.
.. versionadded:: 2.11
"""
default_style = "#0000ff"
styles = {
Token.Whitespace: "",
Token.Text: "",
Token.Keyword: "bold",
Token.Comment: "italic #A3AAB2",
Token.String: "#AB0909",
Token.String.Escape: "#C46C6C",
Token.String.Symbol: "noinherit",
Token.Pitch: "", #"#911520",
Token.Number: "#976806", # includes durations
# A bare 11 is not distinguishable from a number, so we highlight
# the same.
Token.ChordModifier: "#976806",
Token.Name.Lvalue: "#08547A",
Token.Name.BackslashReference: "#08547A",
Token.Name.Builtin.MusicCommand: "bold #08547A",
Token.Name.Builtin.PaperVariable: "bold #6C5A05",
Token.Name.Builtin.HeaderVariable: "bold #6C5A05",
Token.Name.Builtin.MusicFunction: "bold #08547A",
Token.Name.Builtin.Clef: "bold #08547A",
Token.Name.Builtin.Scale: "bold #08547A",
Token.Name.Builtin.RepeatType: "#08547A",
Token.Name.Builtin.Dynamic: "#68175A",
Token.Name.Builtin.Articulation: "#68175A",
Token.Name.Builtin.SchemeFunction: "bold #A83401",
Token.Name.Builtin.SchemeBuiltin: "bold",
Token.Name.Builtin.MarkupCommand: "bold #831E71",
Token.Name.Builtin.Context: "bold #038B8B",
Token.Name.Builtin.ContextProperty: "#038B8B",
Token.Name.Builtin.Grob: "bold #0C7441",
Token.Name.Builtin.GrobProperty: "#0C7441",
Token.Name.Builtin.Translator: "bold #6200A4",
}
| bsd-2-clause | Python |
0d10b60f73703bf142ad826c897cf6bf7d58ffc3 | Return from Inbox edit view is now based on HTTP referer | Inboxen/Inboxen,Inboxen/Inboxen,Inboxen/Inboxen,Inboxen/Inboxen | website/views/inbox/edit.py | website/views/inbox/edit.py | ##
# Copyright (C) 2013-2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.views import generic
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse, resolve, Resolver404
from website import forms
from website.views import base
from inboxen.models import Inbox
__all__ = ["InboxEditView", "FormInboxEditView"]
class InboxEditView(base.CommonContextMixin, base.LoginRequiredMixin, generic.UpdateView):
form_class = forms.InboxEditForm
template_name = "inbox/edit.html"
success_views = ["user-home", "unified-inbox", "single-inbox"]
def get_headline(self):
return _("{inbox}@{domain} Options").format(inbox=self.kwargs["inbox"], domain=self.kwargs["domain"])
def get_form_kwargs(self):
kwargs = super(InboxEditView, self).get_form_kwargs()
kwargs.setdefault("request", self.request)
return kwargs
def get_object(self, *args, **kwargs):
inbox = self.request.user.inbox_set.select_related("domain")
return inbox.get(inbox=self.kwargs["inbox"], domain__domain=self.kwargs["domain"], flags=~Inbox.flags.deleted)
def get_success_url(self):
referer = self.request.META.get("HTTP_REFERER", "/user/home/")
try:
url_name = resolve(referer).url_name
self.success_views.index(url_name)
return referer
except ValueError, Resolver404:
return reverse("user-home")
class FormInboxEditView(InboxEditView):
template_name = "forms/inbox/edit.html"
def form_valid(self, form):
response = super(FormInboxEditView, self).form_valid(form)
response.status_code = 204
return response
| ##
# Copyright (C) 2013-2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.views import generic
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse_lazy
from website import forms
from website.views import base
from inboxen.models import Inbox
__all__ = ["InboxEditView", "FormInboxEditView"]
class InboxEditView(base.CommonContextMixin, base.LoginRequiredMixin, generic.UpdateView):
form_class = forms.InboxEditForm
template_name = "inbox/edit.html"
success_url = reverse_lazy('user-home')
def get_headline(self):
return _("{inbox}@{domain} Options").format(inbox=self.kwargs["inbox"], domain=self.kwargs["domain"])
def get_form_kwargs(self):
kwargs = super(InboxEditView, self).get_form_kwargs()
kwargs.setdefault("request", self.request)
return kwargs
def get_object(self, *args, **kwargs):
inbox = self.request.user.inbox_set.select_related("domain")
return inbox.get(inbox=self.kwargs["inbox"], domain__domain=self.kwargs["domain"], flags=~Inbox.flags.deleted)
class FormInboxEditView(InboxEditView):
template_name = "forms/inbox/edit.html"
def form_valid(self, form):
response = super(FormInboxEditView, self).form_valid(form)
response.status_code = 204
return response
| agpl-3.0 | Python |
d2a6e24cb4703bad0c16bffc00936cc014d516af | bump version | danpoland/pyramid-restful-framework | pyramid_restful/__init__.py | pyramid_restful/__init__.py | from .settings import reload_api_settings
__version__ = '0.13.0'
VERSION = __version__
def includeme(config):
reload_api_settings(config.registry.settings)
| from .settings import reload_api_settings
__version__ = '0.12.0'
VERSION = __version__
def includeme(config):
reload_api_settings(config.registry.settings)
| bsd-2-clause | Python |
7a055e12b809110cdb07a642d12e11d909be1376 | Add convenience properties for discovery config items | leth/nose2,leth/nose2,ptthiem/nose2,little-dude/nose2,little-dude/nose2,ojengwa/nose2,ezigman/nose2,ptthiem/nose2,ezigman/nose2,ojengwa/nose2 | nose2/session.py | nose2/session.py | import argparse
from six.moves import configparser
from nose2 import config, events, util
class Session(object):
"""Configuration session.
Encapsulates all configuration for a given test run.
"""
def __init__(self):
self.argparse = argparse.ArgumentParser(prog='nose2')
self.config = configparser.ConfigParser()
self.hooks = events.PluginInterface()
self.plugins = []
def get(self, section):
# FIXME cache these
items = []
if self.config.has_section(section):
items = self.config.items(section)
return config.Config(items)
def loadConfigFiles(self, *filenames):
self.config.read(filenames)
def loadPlugins(self, modules=None):
# plugins set directly
if modules is None:
modules = []
# plugins mentioned in config file(s)
cfg = self.get('unittest')
more_plugins = cfg.as_list('plugins', [])
exclude = set(cfg.as_list('excluded-plugins', []))
all_ = set(sum(modules, more_plugins)) - exclude
for module in all_:
self.loadPluginsFromModule(util.module_from_name(module))
self.hooks.loadedPlugins(events.PluginsLoadedEvent(self.plugins))
def loadPluginsFromModule(self, module):
avail = []
for entry in dir(module):
try:
item = getattr(module, entry)
except AttributeError:
pass
try:
if issubclass(item, events.Plugin):
avail.append(item)
except TypeError:
pass
for cls in avail:
self.plugins.append(cls(session=self))
def registerPlugin(self, plugin):
if plugin not in self.plugins:
self.plugins.append(plugin)
for method in self.hooks.methods:
if hasattr(plugin, method):
self.hooks.register(method, plugin)
# convenience properties
@property
def testFilePattern(self):
return self.get('unittest').as_str('test-file-pattern', 'test*.py')
@property
def testMethodPrefix(self):
return self.get('unittest').as_str('test-method-prefix', 'test')
| import argparse
from six.moves import configparser
from nose2 import config, events, util
class Session(object):
"""Configuration session.
Encapsulates all configuration for a given test run.
"""
def __init__(self):
self.argparse = argparse.ArgumentParser(prog='nose2')
self.config = configparser.ConfigParser()
self.hooks = events.PluginInterface()
self.plugins = []
def get(self, section):
# FIXME cache these
items = []
if self.config.has_section(section):
items = self.config.items(section)
return config.Config(items)
def loadConfigFiles(self, *filenames):
self.config.read(filenames)
def loadPlugins(self, modules=None):
# plugins set directly
if modules is None:
modules = []
# plugins mentioned in config file(s)
cfg = self.get('unittest')
more_plugins = cfg.as_list('plugins', [])
exclude = set(cfg.as_list('excluded-plugins', []))
all_ = set(sum(modules, more_plugins)) - exclude
for module in all_:
self.loadPluginsFromModule(util.module_from_name(module))
self.hooks.loadedPlugins(events.PluginsLoadedEvent(self.plugins))
def loadPluginsFromModule(self, module):
avail = []
for entry in dir(module):
try:
item = getattr(module, entry)
except AttributeError:
pass
try:
if issubclass(item, events.Plugin):
avail.append(item)
except TypeError:
pass
for cls in avail:
self.plugins.append(cls(session=self))
def registerPlugin(self, plugin):
if plugin not in self.plugins:
self.plugins.append(plugin)
for method in self.hooks.methods:
if hasattr(plugin, method):
self.hooks.register(method, plugin)
| bsd-2-clause | Python |
19f2d58c40d56b6d4a30c1bc12ce24af7db9e992 | Fix names used in dft.old refft->rfft | efiring/numpy-work,teoliphant/numpy-refactor,efiring/numpy-work,illume/numpy3k,chadnetzer/numpy-gaurdro,efiring/numpy-work,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,chadnetzer/numpy-gaurdro,efiring/numpy-work,Ademan/NumPy-GSoC,illume/numpy3k,Ademan/NumPy-GSoC,illume/numpy3k,illume/numpy3k,teoliphant/numpy-refactor,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,Ademan/NumPy-GSoC | numpy/dft/old.py | numpy/dft/old.py |
__all__ = ['fft', 'fft2d', 'fftnd', 'hermite_fft', 'inverse_fft', 'inverse_fft2d',
'inverse_fftnd', 'inverse_hermite_fft', 'inverse_real_fft', 'inverse_real_fft2d',
'inverse_real_fftnd', 'real_fft', 'real_fft2d', 'real_fftnd']
from fftpack import fft
from fftpack import fft2 as fft2d
from fftpack import fftn as fftnd
from fftpack import hfft as hermite_fft
from fftpack import ifft as inverse_fft
from fftpack import ifft2 as inverse_fft2d
from fftpack import ifftn as inverse_fftnd
from fftpack import ihfft as inverse_hermite_fft
from fftpack import irfft as inverse_real_fft
from fftpack import irfft2 as inverse_real_fft2d
from fftpack import irfftn as inverse_real_fftnd
from fftpack import rfft as real_fft
from fftpack import rfft2 as real_fft2d
from fftpack import rfftn as real_fftnd
|
__all__ = ['fft', 'fft2d', 'fftnd', 'hermite_fft', 'inverse_fft', 'inverse_fft2d',
'inverse_fftnd', 'inverse_hermite_fft', 'inverse_real_fft', 'inverse_real_fft2d',
'inverse_real_fftnd', 'real_fft', 'real_fft2d', 'real_fftnd']
from fftpack import fft
from fftpack import fft2 as fft2d
from fftpack import fftn as fftnd
from fftpack import hfft as hermite_fft
from fftpack import ifft as inverse_fft
from fftpack import ifft2 as inverse_fft2d
from fftpack import ifftn as inverse_fftnd
from fftpack import ihfft as inverse_hermite_fft
from fftpack import irefft as inverse_real_fft
from fftpack import irefft2 as inverse_real_fft2d
from fftpack import irefftn as inverse_real_fftnd
from fftpack import refft as real_fft
from fftpack import refft2 as real_fft2d
from fftpack import refftn as real_fftnd
| bsd-3-clause | Python |
536605ed5200e87bcac9c4544eadd2a4df373cdc | update font | shunliz/test,shunliz/test,shunliz/test,shunliz/test,shunliz/test,shunliz/test,shunliz/test,shunliz/test | python/pygame/game4/font.py | python/pygame/game4/font.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 记住上面这行是必须的,而且保存文件的编码要一致!
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
try:
screen = pygame.display.set_mode((640, 480), 0, 32)
except pygame.error, e:
print "Can't create the display :-("
print e
exit()
#screen = pygame.display.set_mode((640, 480), 0, 32)
font = pygame.font.SysFont("宋体", 40)
#上句在Linux可行,在我的Windows 7 64bit上不行,XP不知道行不行
#font = pygame.font.SysFont("simsunnsimsun", 40)
#用get_fonts()查看后看到了这个字体名,在我的机器上可以正常显示了
#font = pygame.font.Font("simsun.ttc", 40)
#这句话总是可以的,所以还是TTF文件保险啊
text_surface = font.render(u"你好", True, (0, 0, 255))
x = 0
y = (480 - text_surface.get_height())/2
background = pygame.image.load("sushiplate.jpg").convert()
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.blit(background, (0, 0))
x -= 2 # 文字滚动太快的话,改改这个数字
if x < -text_surface.get_width():
x = 640 - text_surface.get_width()
screen.blit(text_surface, (x, y))
pygame.display.update() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 记住上面这行是必须的,而且保存文件的编码要一致!
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
font = pygame.font.SysFont("宋体", 40)
#上句在Linux可行,在我的Windows 7 64bit上不行,XP不知道行不行
#font = pygame.font.SysFont("simsunnsimsun", 40)
#用get_fonts()查看后看到了这个字体名,在我的机器上可以正常显示了
#font = pygame.font.Font("simsun.ttc", 40)
#这句话总是可以的,所以还是TTF文件保险啊
text_surface = font.render(u"你好", True, (0, 0, 255))
x = 0
y = (480 - text_surface.get_height())/2
background = pygame.image.load("sushiplate.jpg").convert()
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.blit(background, (0, 0))
x -= 2 # 文字滚动太快的话,改改这个数字
if x < -text_surface.get_width():
x = 640 - text_surface.get_width()
screen.blit(text_surface, (x, y))
pygame.display.update() | apache-2.0 | Python |
737b84fa3a7789cb263cb58cab9dcb1858995982 | Add Tensor Flow | kakaba2009/MachineLearning,kakaba2009/MachineLearning,kakaba2009/MachineLearning,kakaba2009/MachineLearning | python/src/fft/FourierEx.py | python/src/fft/FourierEx.py | import numpy as np
import pylab as plt
from numpy import fft
import src.mylib.mlstm as mlstm
def bandpass_filter(x, freq, frequency_of_signal=0, band=0.1):
if (frequency_of_signal - band) < abs(freq) < (frequency_of_signal + band):
return x
else:
return 0
ds = mlstm.loadFXData('JPY=X', '../db/forex.db', 3500)
y = ds[['Close']].values
y = y.flatten()
for k in range(5, 30):
for i in range(0, 3000, 1):
x = y[i:i+k]
N = len(x)
spectrum = fft.fft(x)
feq = fft.fftfreq(N) # frequencies
ampli = np.absolute(spectrum) # amplitude
phase = np.angle(spectrum) # phase
#print(phase)
index = np.argsort(-ampli)
sfreq = feq[index]
sampl = ampli[index]
sfreq = sfreq[sfreq > 0]
#big = list(zip(*sfreq))
if sfreq[0] * N >= k*0.5 - 1:
print(N, sfreq[:2] * N)
#plt.semilogy(ampli, 'o')
#F_filtered = np.asanyarray([bandpass_filter(x, freq) for x, freq in zip(spectrum, feq)])
#filtered_signal = np.fft.ifft(F_filtered)
#plt.semilogy(feq[1:], ampli[1:]), 'o') #zero feq is very large
#plt.semilogy(ampli[1:])
#plt.title(str(N))
#plt.legend()
#plt.show() | import numpy as np
import pylab as plt
from numpy import fft
import src.mylib.mlstm as mlstm
def bandpass_filter(x, freq, frequency_of_signal=0, band=0.1):
if (frequency_of_signal - band) < abs(freq) < (frequency_of_signal + band):
return x
else:
return 0
ds = mlstm.loadFXData('JPY=X', '../db/forex.db', 9000)
x = ds[['Close']].values
x = x.flatten()
L = []
for i in range(1000):
x = x[i:]
N = len(x)
spectrum = fft.fft(x)
feq = fft.fftfreq(N) # frequencies
ampli = np.absolute(spectrum) # amplitude
phase = np.angle(spectrum) # phase
#print(phase)
index = np.argsort(-ampli)
sfreq = feq[index]
sampl = ampli[index]
#sfreq = np.where(sfreq > 0)
#big = list(zip(*sfreq))
print(sfreq[1:10] * N)
#plt.semilogy(sfreq * N, sampl, 'o')
#F_filtered = np.asanyarray([bandpass_filter(x, freq) for x, freq in zip(spectrum, feq)])
#filtered_signal = np.fft.ifft(F_filtered)
#plt.semilogy(feq[1:], ampli[1:]), 'o') #zero feq is very large
#plt.semilogy(ampli[1:])
plt.legend()
plt.show()
| apache-2.0 | Python |
bd93afc0d0b8af145e302d3ff0ffcf7c5c7fc9b6 | Fix versions | mkukielka/oddt,mkukielka/oddt,oddt/oddt,oddt/oddt | oddt/__init__.py | oddt/__init__.py | """Open Drug Discovery Toolkit
==============================
Universal and easy to use resource for various drug discovery tasks, ie docking, virutal screening, rescoring.
Attributes
----------
toolkit : module,
Toolkits backend module, currenlty OpenBabel [ob] and RDKit [rdk].
This setting is toolkit-wide, and sets given toolkit as default
"""
import os, subprocess
from numpy.random import seed as np_seed
from random import seed as python_seed
try:
from .toolkits import ob
except ImportError:
ob = None
try:
from .toolkits import rdk
except ImportError:
rdk = None
if ob:
toolkit = ob
elif rdk:
toolkit = rdk
else:
raise Exception('You need at least one toolkit for ODDT.')
def get_version():
home = os.path.dirname(__file__)
v = None
if os.path.isdir(home + '/../.git'):
try:
v = subprocess.check_output(['git', 'describe', '--tags'], cwd=home).strip()
except CalledProcessError: # catch errors, eg. no git installed
pass
if not v:
v = '0.1.3'
return v
__version__ = get_version()
__all__ = ['toolkit']
def random_seed(i):
"""
Set global random seed for all underlying components. Use 'brute-force' approach, by setting undelying libraries' seeds.
Parameters
----------
i: int
integer used as seed for random number generators
"""
# python's random module
python_seed(i)
# numpy random module
np_seed(i)
| """Open Drug Discovery Toolkit
==============================
Universal and easy to use resource for various drug discovery tasks, ie docking, virutal screening, rescoring.
Attributes
----------
toolkit : module,
Toolkits backend module, currenlty OpenBabel [ob] and RDKit [rdk].
This setting is toolkit-wide, and sets given toolkit as default
"""
import os, subprocess
from numpy.random import seed as np_seed
from random import seed as python_seed
try:
from .toolkits import ob
except ImportError:
ob = None
try:
from .toolkits import rdk
except ImportError:
rdk = None
if ob:
toolkit = ob
elif rdk:
toolkit = rdk
else:
raise Exception('You need at least one toolkit for ODDT.')
def get_version():
home = os.path.dirname(__file__)
v = None
if os.path.isdir(home + '/../.git'):
v = subprocess.check_output(['git', 'describe', '--tags'], cwd=home).strip()
if not v:
v = '0.1.3'
return v
__version__ = get_version()
__all__ = ['toolkit']
def random_seed(i):
"""
Set global random seed for all underlying components. Use 'brute-force' approach, by setting undelying libraries' seeds.
Parameters
----------
i: int
integer used as seed for random number generators
"""
# python's random module
python_seed(i)
# numpy random module
np_seed(i)
| bsd-3-clause | Python |
dc69135d94907049f0c6278e5f09f382ac8abfb2 | remove instead of pop in insert. | JustusW/BetterOrderedDict,therealfakemoot/collections2 | better_od/core.py | better_od/core.py | from collections import MutableMapping
class BetterOrderedDict(MutableMapping):
def __init__(self, **kwargs):
self._d = dict()
self._keys = []
def __len__(self):
return len(self._d)
def __iter__(self):
for key in self._keys:
yield key
def __setitem__(self, key, value):
self._keys.append(key)
self._d[key] = value
def __getitem__(self, key):
return self._d[key]
def __delitem__(self, key):
self._keys.remove(key)
del self._d[key]
def key_index(self, key):
return self._keys.index(key)
def insert(self, key, value, index):
if key in self._keys:
self._keys.remove(key)
self._keys.insert(index, key)
self._d[key] = value
def reorder_keys(self, keys):
if self._keys != self._d:
raise ValueError('Keys do not match.')
self._keys = keys
def __repr__(self):
return str([(key, self[key]) for key in self])
| from collections import MutableMapping
class BetterOrderedDict(MutableMapping):
def __init__(self, **kwargs):
self._d = dict()
self._keys = []
def __len__(self):
return len(self._d)
def __iter__(self):
for key in self._keys:
yield key
def __setitem__(self, key, value):
self._keys.append(key)
self._d[key] = value
def __getitem__(self, key):
return self._d[key]
def __delitem__(self, key):
self._keys.remove(key)
del self._d[key]
def key_index(self, key):
return self._keys.index(key)
def insert(self, key, value, index):
if key in self._d:
self._keys.pop(key)
self._keys.insert(index, key)
self._d[key] = value
def reorder_keys(self, keys):
if self._keys != self._d:
raise ValueError('Keys do not match.')
self._keys = keys
| mit | Python |
8d79cdbc0016faf7e94d0b295842a182e11b8df3 | Update killed_and_injured_count_per_age_group_stacked_widget.py | hasadna/anyway,hasadna/anyway,hasadna/anyway,hasadna/anyway | anyway/widgets/suburban_widgets/killed_and_injured_count_per_age_group_stacked_widget.py | anyway/widgets/suburban_widgets/killed_and_injured_count_per_age_group_stacked_widget.py | from typing import Dict, List
from flask_babel import _
from anyway.backend_constants import InjurySeverity, BE_CONST as BE
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.killed_and_injured_count_per_age_group_widget_utils import (
KilledAndInjuredCountPerAgeGroupWidgetUtils,
AGE_RANGES,
)
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import add_empty_keys_to_gen_two_level_dict, gen_entity_labels
INJURY_ORDER = [InjurySeverity.LIGHT_INJURED, InjurySeverity.SEVERE_INJURED, InjurySeverity.KILLED]
@register
class KilledInjuredCountPerAgeGroupStackedWidget(SubUrbanWidget):
name: str = "killed_and_injured_count_per_age_group_stacked"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 30
def generate_items(self) -> None:
raw_data = KilledAndInjuredCountPerAgeGroupWidgetUtils.filter_and_group_injured_count_per_age_group(
self.request_params
)
partial_processed = add_empty_keys_to_gen_two_level_dict(
raw_data, self.get_age_range_list(), InjurySeverity.codes()
)
structured_data_list = []
for age_group, severity_dict in partial_processed.items():
ordered_list = [
{BE.LKEY: inj.get_label(), BE.VAL: severity_dict.get(inj.value, 0)}
for inj in INJURY_ORDER
]
structured_data_list.append({BE.LKEY: age_group, BE.SERIES: ordered_list})
self.items = structured_data_list
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Killed and injury stacked per age group"),
"subtitle": _("In") + " " + request_params.location_info["road_segment_name"],
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
@staticmethod
def get_age_range_list() -> List[str]:
return [f"{item_min_range}-{item_max_range}" if item_max_range < 120 else f"{item_min_range}+"
for item_min_range, item_max_range in zip(AGE_RANGES, AGE_RANGES[1:])]
| from typing import Dict, List
from flask_babel import _
from anyway.backend_constants import InjurySeverity, BE_CONST as BE
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.killed_and_injured_count_per_age_group_widget_utils import (
KilledAndInjuredCountPerAgeGroupWidgetUtils,
AGE_RANGE_DICT,
)
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import add_empty_keys_to_gen_two_level_dict, gen_entity_labels
INJURY_ORDER = [InjurySeverity.LIGHT_INJURED, InjurySeverity.SEVERE_INJURED, InjurySeverity.KILLED]
MAX_AGE = 200
@register
class KilledInjuredCountPerAgeGroupStackedWidget(SubUrbanWidget):
name: str = "killed_and_injured_count_per_age_group_stacked"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 30
def generate_items(self) -> None:
raw_data = KilledAndInjuredCountPerAgeGroupWidgetUtils.filter_and_group_injured_count_per_age_group(
self.request_params
)
partial_processed = add_empty_keys_to_gen_two_level_dict(
raw_data, self.get_age_range_list(), InjurySeverity.codes()
)
structured_data_list = []
for age_group, severity_dict in partial_processed.items():
ordered_list = [
{BE.LKEY: inj.get_label(), BE.VAL: severity_dict.get(inj.value, 0)}
for inj in INJURY_ORDER
]
structured_data_list.append({BE.LKEY: age_group, BE.SERIES: ordered_list})
self.items = structured_data_list
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Killed and injury stacked per age group"),
"subtitle": _("In") + " " + request_params.location_info["road_segment_name"],
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
@staticmethod
def get_age_range_list() -> List[str]:
age_list = []
for item_min_range, item_max_range in AGE_RANGE_DICT.items():
if MAX_AGE == item_max_range:
age_list.append("65+")
else:
age_list.append(f"{item_min_range:02}-{item_max_range:02}")
return age_list
| mit | Python |
77cab93fc4ee7d1339a25a22d0b5b99acd3fcbe1 | Fix imports for python 3 | befelix/lyapunov-learning | safe_learning/__init__.py | safe_learning/__init__.py | from __future__ import absolute_import
from .utilities import *
from .safe_learning import *
| from utilities import *
from safe_learning import *
| mit | Python |
b493dd68e85cae43fe45b0d23d863e44c88390a9 | fix date format | ClintonMorrison/personal-website,ClintonMorrison/personal-website,ClintonMorrison/personal-website,ClintonMorrison/personal-website,ClintonMorrison/personal-website,ClintonMorrison/personal-website | core/functions.py | core/functions.py | import config
import importlib
import textwrap
def path_to_url(path = 'index', parameters = [], hash = ''):
if hash == '':
return "http://%s/%s" % (config.base_url, path)
else:
return "http://%s/%s#%s" % (config.base_url, path, hash)
# Gets URL for static resource
def static_to_url(path):
return path_to_url("static/" + path)
def get_func_from_module(path):
module_name, func_name = path.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, func_name)
return func
def truncate_text(text, length):
if len(text) <= length:
return text
next_space_index = text.find(' ', length)
return text[:next_space_index].rstrip(',.?,:;" ') + ' ...'
def format_date(date):
return date.strftime('%-B %-d, %-Y') | import config
import importlib
import textwrap
def path_to_url(path = 'index', parameters = [], hash = ''):
if hash == '':
return "http://%s/%s" % (config.base_url, path)
else:
return "http://%s/%s#%s" % (config.base_url, path, hash)
# Gets URL for static resource
def static_to_url(path):
return path_to_url("static/" + path)
def get_func_from_module(path):
module_name, func_name = path.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, func_name)
return func
def truncate_text(text, length):
if len(text) <= length:
return text
next_space_index = text.find(' ', length)
return text[:next_space_index].rstrip(',.?,:;" ') + ' ...'
def format_date(date):
return date.strftime('%-B %-m, %-Y') | apache-2.0 | Python |
a16fd23027b5d3f1378f5b9f75958d0f3ef2a124 | Bump version number to reflect dev status. | caktus/django-email-bandit,vericant/django-email-bandit,caktus/django-email-bandit | bandit/__init__.py | bandit/__init__.py | """
django-email-bandit is a Django email backend for hijacking email sending in a test environment.
"""
__version_info__ = {
'major': 1,
'minor': 0,
'micro': 0,
'releaselevel': 'dev',
}
def get_version():
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s' % __version_info__)
return ''.join(vers)
__version__ = get_version()
| """
django-email-bandit is a Django email backend for hijacking email sending in a test environment.
"""
__version_info__ = {
'major': 0,
'minor': 2,
'micro': 0,
'releaselevel': 'final',
}
def get_version():
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s' % __version_info__)
return ''.join(vers)
__version__ = get_version()
| bsd-3-clause | Python |
7dcba1ae4929820dd000bef1571257023a5b0ef0 | Update the stats to include the hostname | gmr/statelessd,gmr/statelessd | statelessd/stats.py | statelessd/stats.py | """
stats
"""
import logging
import resource
import time
from statelessd import base
LOGGER = logging.getLogger(__name__)
class Stats(base.RequestHandler):
"""Gather stats counters from RabbitMQ objects and return as JSON object"""
def initialize(self):
"""Initial the Request Handler making sure that the connection and
channel handlers are held in the application scope for this process.
"""
super(Stats, self).initialize()
if not hasattr(self.application, 'rabbitmq'):
setattr(self.application, 'rabbitmq', dict())
def _base_stats(self):
"""Return base stats including resource utilization for this process.
:rtype: dict
"""
usage = resource.getrusage(resource.RUSAGE_SELF)
return {'host': self.application.host,
'port': self.application.port,
'requests': self.application.counters,
'timestamp': int(time.time()),
'block': {'input': usage.ru_inblock,
'output': usage.ru_oublock},
'context_switches': usage.ru_nvcsw + usage.ru_nivcsw,
'cpu_time': {'user': usage.ru_utime,
'system': usage.ru_stime},
'memory_usage': usage.ru_maxrss,
'page_faults': {'minor': usage.ru_minflt,
'major': usage.ru_majflt},
'page_size': resource.getpagesize(),
'signals_received': usage.ru_nsignals,
'swap_outs': usage.ru_nswap}
def get(self, *args, **kwargs):
"""Get the stats, returning a JSON object with the info.
:param tuple args: positional arguments
:param dict kwargs: keyword arguments
"""
output = self._base_stats()
output['connections'] = dict()
for key in self.application.rabbitmq.keys():
output['connections'][key] = self.application.rabbitmq[key].stats
self.write(output)
| """
stats
"""
import logging
import resource
import time
from statelessd import base
LOGGER = logging.getLogger(__name__)
class Stats(base.RequestHandler):
"""Gather stats counters from RabbitMQ objects and return as JSON object"""
def initialize(self):
"""Initial the Request Handler making sure that the connection and
channel handlers are held in the application scope for this process.
"""
super(Stats, self).initialize()
if not hasattr(self.application, 'rabbitmq'):
setattr(self.application, 'rabbitmq', dict())
def _base_stats(self):
"""Return base stats including resource utilization for this process.
:rtype: dict
"""
usage = resource.getrusage(resource.RUSAGE_SELF)
return {'port': self.application.port,
'requests': self.application.counters,
'timestamp': int(time.time()),
'block': {'input': usage.ru_inblock,
'output': usage.ru_oublock},
'context_switches': usage.ru_nvcsw + usage.ru_nivcsw,
'cpu_time': {'user': usage.ru_utime,
'system': usage.ru_stime},
'memory_usage': usage.ru_maxrss,
'page_faults': {'minor': usage.ru_minflt,
'major': usage.ru_majflt},
'page_size': resource.getpagesize(),
'signals_received': usage.ru_nsignals,
'swap_outs': usage.ru_nswap}
def get(self, *args, **kwargs):
"""Get the stats, returning a JSON object with the info.
:param tuple args: positional arguments
:param dict kwargs: keyword arguments
"""
output = self._base_stats()
output['connections'] = dict()
for key in self.application.rabbitmq.keys():
output['connections'][key] = self.application.rabbitmq[key].stats
self.write(output)
| bsd-3-clause | Python |
fbbc75965487d3f1c2d4dbfb2dc6dc482566a636 | fix query mixin order | fkmclane/web.py | web/query.py | web/query.py | import re
import urllib.parse
import web
regex = '(?:\?([\w=&]*))?'
class QueryMixIn:
group = 0
def respond(self):
self.request.query = dict(urllib.parse.parse_qsl(self.groups[self.group], True))
return super().respond()
def new(base, handler):
class GenQueryHandler(QueryMixIn, handler):
pass
GenQueryHandler.group = re.compile(base).groups + 1
return {base + regex: handler}
| import re
import urllib.parse
import web
regex = '\?([\w=&])'
class QueryMixIn:
group = 0
def respond(self):
self.request.query = dict(urllib.parse.parse_qsl(self.groups[self.group], True))
def new(base, handler):
class GenQueryHandler(handler, QueryMixIn):
pass
GenQueryHandler.group = re.compile(base).groups + 1
return {base + regex: handler}
| mit | Python |
8daa2343ff164c0fa86dee316c0148c83fd4f2b9 | add explicit vim module import for neovim support | rafaqz/citation.vim | python/citation_vim/citation.py | python/citation_vim/citation.py | # -*- coding: utf-8 -*-
import os.path
import sys
import vim
class Citation(object):
@staticmethod
def connect():
"""
Returns source from builder,
printing any errors from python to the vim console.
"""
try:
set_script_path()
from citation_vim.builder import Builder
from citation_vim.loader import Loader
return Builder(Loader().context).build_source()
except:
print_exception()
def set_script_path():
script_path = os.path.join(vim.eval('s:script_path'), '../../../python')
sys.path.insert(0, script_path)
def print_exception():
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print("Citation.vim error:\n" + "".join(line for line in lines))
| # -*- coding: utf-8 -*-
import os.path
import sys
class Citation(object):
@staticmethod
def connect():
"""
Returns source from builder,
printing any errors from python to the vim console.
"""
try:
set_script_path()
from citation_vim.builder import Builder
from citation_vim.loader import Loader
return Builder(Loader().context).build_source()
except:
print_exception()
def set_script_path():
script_path = os.path.join(vim.eval('s:script_path'), '../../../python')
sys.path.insert(0, script_path)
def print_exception():
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print("Citation.vim error:\n" + "".join(line for line in lines))
| mit | Python |
e8db039771cb94987d15af726bfa4212d4f6e9e6 | print headers | hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples,hermantai/samples | python/sqlalchemy/basic/main.py | python/sqlalchemy/basic/main.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import models
def print_out(obj):
print("\n\n", ">>> ", obj, "\n\n", sep="")
def print_header(msg):
print("\n\n", "{0:=^80}".format(" " + msg + " "), "\n\n", sep="")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--interactive",
action="store_true",
help="Start an ipython session immediately after adding some objects",
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
engine = create_engine("sqlite://", echo=True)
models.Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
# add some objects
print_header("Add a user and its profile")
session1 = Session()
user1 = models.User(username="user1")
profile1 = models.Profile(
firstname="peter",
age=12,
user=user1,
)
session1.add(user1)
session1.commit()
session1.close()
# query objects and related objects
print_header("Query the user and the profile")
session2 = Session()
print_out(session2.query(models.User).all())
print_out(session2.query(models.Profile).all())
print_out(session2.query(models.User).first().profile)
print_out(session2.query(models.Profile).one().user)
session2.close()
# update an object
print_header("Update user1 with a new username")
session3 = Session()
user1.username = "newname"
session3.add(user1)
session3.commit()
print_out(session3.query(models.User).all())
session3.close()
if args.interactive:
from IPython import embed
embed()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import models
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def print_out(obj):
print("\n\n", ">>> ", obj, "\n\n", sep="")
if __name__ == '__main__':
engine = create_engine("sqlite://", echo=True)
models.Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
# add some objects
session1 = Session()
user1 = models.User(username="user1")
session1.add(user1)
session1.commit()
profile1 = models.Profile(
firstname="peter",
age=12,
user=user1,
)
session1.add(profile1)
session1.commit()
session1.close()
# query objects and related objects
session2 = Session()
print_out(session2.query(models.User).all())
print_out(session2.query(models.Profile).all())
print_out(session2.query(models.User).first().profile)
print_out(session2.query(models.Profile).one().user)
session2.close()
# update an object
session3 = Session()
user1.username = "newname"
session3.add(user1)
session3.commit()
print_out(session3.query(models.User).all())
session3.close()
| apache-2.0 | Python |
4f170397acac08c6fd8a4573ead1f66d631ac8dc | Update dsub version to 0.3.2.dev0 | DataBiosphere/dsub,DataBiosphere/dsub | dsub/_dsub_version.py | dsub/_dsub_version.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for dsub's version.
This must remain small and dependency-free so that any dsub module may
import it without creating circular dependencies. Note that this module
is parsed as a text file by setup.py and changes to the format of this
file could break setup.py.
The version should follow formatting requirements specified in PEP-440.
- https://www.python.org/dev/peps/pep-0440
A typical release sequence will be versioned as:
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
DSUB_VERSION = '0.3.2.dev0'
| # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for dsub's version.
This must remain small and dependency-free so that any dsub module may
import it without creating circular dependencies. Note that this module
is parsed as a text file by setup.py and changes to the format of this
file could break setup.py.
The version should follow formatting requirements specified in PEP-440.
- https://www.python.org/dev/peps/pep-0440
A typical release sequence will be versioned as:
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
DSUB_VERSION = '0.3.1'
| apache-2.0 | Python |
d9b47e8ab8ddc024704f01cc307aa3539ec4e22a | mark links to inexistant wikipages with a css class. | jerem/django-rcsfield | rcs/wiki/templatetags/wikify.py | rcs/wiki/templatetags/wikify.py | # from http://open.e-scribe.com/browser/python/django/apps/protowiki/templatetags/wikitags.py
# copyright Paul Bissex, MIT license
from django.template import Library
from django.conf import settings
from rcs.wiki.models import WikiPage
register = Library()
@register.filter
def wikify(value):
"""Makes WikiWords"""
import re
#wikifier = re.compile(r'\b(([A-Z]+[a-z]+){2,})\b')
def replace_wikiword(m):
slug = m.group(1)
try:
WikiPage.objects.get(slug=slug)
return r' <a href="/wiki/%s/">%s</a>' % (slug, slug)
except WikiPage.DoesNotExist:
return r' <a class="doesnotexist" href="/wiki/%s/">%s</a>' % (slug, slug)
wikifier = re.compile(r'[^;/+-]\b(([A-Z]+[a-z]+){2,})\b')
return wikifier.sub(replace_wikiword, value) | # from http://open.e-scribe.com/browser/python/django/apps/protowiki/templatetags/wikitags.py
# copyright Paul Bissex, MIT license
from django.template import Library
from django.conf import settings
register = Library()
@register.filter
def wikify(value):
"""Makes WikiWords"""
import re
#wikifier = re.compile(r'\b(([A-Z]+[a-z]+){2,})\b')
wikifier = re.compile(r'[^;/+-]\b(([A-Z]+[a-z]+){2,})\b')
return wikifier.sub(r' <a href="/wiki/\1/">\1</a>', value) | bsd-3-clause | Python |
9bbf179008ef653ea1969a61d034f6392af14ec3 | send heartbeat while waiting result | cenkalti/kuyruk,cenkalti/kuyruk | kuyruk/result.py | kuyruk/result.py | import time
import json
import errno
import socket
import logging
from kuyruk.exceptions import ResultTimeout, RemoteException
logger = logging.getLogger(__name__)
class Result(object):
def __init__(self, channel):
self._channel = channel
def _process_message(self, message):
logger.debug("Reply received: %s", message.body)
d = json.loads(message.body)
self.result = d['result']
self.exception = d.get('exception')
def wait(self, timeout):
logger.debug("Waiting for task result")
start = time.time()
while True:
try:
if self.exception:
raise RemoteException(self.exception['type'],
self.exception['value'],
self.exception['traceback'])
return self.result
except AttributeError:
pass
try:
self._channel.connection.heartbeat_tick()
self._channel.connection.drain_events(timeout=1)
except socket.timeout:
if time.time() - start > timeout:
raise ResultTimeout
except socket.error as e:
if e.errno != errno.EINTR:
raise
| import time
import json
import errno
import socket
import logging
from kuyruk.exceptions import ResultTimeout, RemoteException
logger = logging.getLogger(__name__)
class Result(object):
def __init__(self, channel):
self._channel = channel
def _process_message(self, message):
logger.debug("Reply received: %s", message.body)
d = json.loads(message.body)
self.result = d['result']
self.exception = d.get('exception')
def wait(self, timeout):
logger.debug("Waiting for task result")
start = time.time()
while True:
try:
if self.exception:
raise RemoteException(self.exception['type'],
self.exception['value'],
self.exception['traceback'])
return self.result
except AttributeError:
pass
try:
self._channel.connection.drain_events(timeout=1)
except socket.timeout:
if time.time() - start > timeout:
raise ResultTimeout
except socket.error as e:
if e.errno != errno.EINTR:
raise
| mit | Python |
802667e1b64b68b80b81bee14990a98a10f4696d | Make sure that we generate a valid BSSID, thanks Estefania :) | Panagiotis-Kon/empower-runtime,Panagiotis-Kon/empower-runtime,Panagiotis-Kon/empower-runtime | empower/core/utils.py | empower/core/utils.py | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio, Estefania Coronado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""EmPOWER utils."""
from empower.datatypes.etheraddress import EtherAddress
def hex_to_ether(in_hex):
"""Convert Int to EtherAddress."""
str_hex_value = format(in_hex, 'x')
padding = '0' * (12 - len(str_hex_value))
mac_string = padding + str_hex_value
mac_string_array = \
[mac_string[i:i+2] for i in range(0, len(mac_string), 2)]
return EtherAddress(":".join(mac_string_array))
def ether_to_hex(ether):
"""Convert EtherAddress to Int."""
return int.from_bytes(ether.to_raw(), byteorder='big')
def generate_bssid(base_mac, sta_mac):
""" Generate a new BSSID address. """
base = str(base_mac).split(":")[0:3]
unicast_addr_mask = int(base[0], 16) & 0xFE
base[0] = str(format(unicast_addr_mask, 'X'))
sta = str(sta_mac).split(":")[3:6]
return EtherAddress(":".join(base + sta))
| #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""EmPOWER utils."""
from empower.datatypes.etheraddress import EtherAddress
def hex_to_ether(in_hex):
"""Convert Int to EtherAddress."""
str_hex_value = format(in_hex, 'x')
padding = '0' * (12 - len(str_hex_value))
mac_string = padding + str_hex_value
mac_string_array = \
[mac_string[i:i+2] for i in range(0, len(mac_string), 2)]
return EtherAddress(":".join(mac_string_array))
def ether_to_hex(ether):
"""Convert EtherAddress to Int."""
return int.from_bytes(ether.to_raw(), byteorder='big')
def generate_bssid(base_mac, sta_mac):
""" Generate a new BSSID address. """
base = str(base_mac).split(":")[0:3]
sta = str(sta_mac).split(":")[3:6]
return EtherAddress(":".join(base + sta))
| apache-2.0 | Python |
f84e6ca8afad2700f2c38c1ab1f229c01331257b | Fix test_config.py | ahal/active-data-recipes,ahal/active-data-recipes | test/test_config.py | test/test_config.py | import os
from copy import deepcopy
from pathlib import Path
import pytest
from appdirs import user_config_dir
from tomlkit import dumps
from adr.configuration import (
Configuration,
merge_to,
)
here = Path(__file__).parent.resolve()
@pytest.fixture
def create_config(tmpdir):
def inner(data):
config_path = tmpdir.join('config.toml')
with open(config_path, 'w') as fh:
fh.write(dumps({'adr': data}))
return Configuration(config_path.strpath)
return inner
def test_config(create_config):
config = Configuration()
path = os.environ.get('ADR_CONFIG_PATH', Path(user_config_dir('adr')) / 'config.toml')
assert config.path.as_posix == Path(path).as_posix
config = create_config({})
assert config['verbose'] is False
assert config.debug is False
defaults = Configuration.DEFAULTS
defaults['sources'] = set(defaults['sources'])
assert config._config == defaults
config = create_config({'verbose': True})
assert config.verbose is True
assert config.debug is False
config = create_config({'alist': ['foo']})
config.merge({'alist': ['bar']})
assert set(config.alist) == set(['foo', 'bar'])
def test_merge_to():
a = {'l': [1], 'd': {'one': 1}}
b = {'l': 1, 'd': 1}
assert merge_to(a, deepcopy(b)) == {'d': {'one': 1}, 'l': [1]}
assert merge_to(b, deepcopy(a)) == {'d': 1, 'l': 1}
a = {'one': 1, 'l1': [1], 'l2': [1, 2], 'd1': {'d2': {'foo': 'bar'}}, 'd3': None}
b = {'one': 2, 'two': 2, 'l1': False, 'l2': [3, 4], 'd1': {'d2': {'baz': True}}, 'd3': {}}
assert merge_to(a, deepcopy(b)) == {
'one': 1,
'two': 2,
'l1': [1],
'l2': [3, 4, 1, 2],
'd1': {'d2': {'foo': 'bar', 'baz': True}},
'd3': None,
}
assert merge_to(b, deepcopy(a)) == {
'one': 2,
'two': 2,
'l1': False,
'l2': [1, 2, 3, 4],
'd1': {'d2': {'foo': 'bar', 'baz': True}},
'd3': {},
}
| import os
from copy import deepcopy
from pathlib import Path
import pytest
from appdirs import user_config_dir
from tomlkit import dumps
from adr.configuration import (
Configuration,
merge_to,
)
here = Path(__file__).parent.resolve()
@pytest.fixture
def create_config(tmpdir):
def inner(data):
config_path = tmpdir.join('config.toml')
with open(config_path, 'w') as fh:
fh.write(dumps({'adr': data}))
return Configuration(config_path.strpath)
return inner
def test_config(create_config):
config = Configuration()
path = os.environ.get('ADR_CONFIG_PATH', Path(user_config_dir('adr')) / 'config.toml')
assert config.path.as_posix == Path(path).as_posix
config = create_config({})
assert config['verbose'] is False
assert config.debug is False
assert config._config == Configuration.DEFAULTS
config = create_config({'verbose': True})
assert config.verbose is True
assert config.debug is False
config = create_config({'alist': ['foo']})
config.merge({'alist': ['bar']})
assert set(config.alist) == set(['foo', 'bar'])
def test_merge_to():
a = {'l': [1], 'd': {'one': 1}}
b = {'l': 1, 'd': 1}
assert merge_to(a, deepcopy(b)) == {'d': {'one': 1}, 'l': [1]}
assert merge_to(b, deepcopy(a)) == {'d': 1, 'l': 1}
a = {'one': 1, 'l1': [1], 'l2': [1, 2], 'd1': {'d2': {'foo': 'bar'}}, 'd3': None}
b = {'one': 2, 'two': 2, 'l1': False, 'l2': [3, 4], 'd1': {'d2': {'baz': True}}, 'd3': {}}
assert merge_to(a, deepcopy(b)) == {
'one': 1,
'two': 2,
'l1': [1],
'l2': [3, 4, 1, 2],
'd1': {'d2': {'foo': 'bar', 'baz': True}},
'd3': None,
}
assert merge_to(b, deepcopy(a)) == {
'one': 2,
'two': 2,
'l1': False,
'l2': [1, 2, 3, 4],
'd1': {'d2': {'foo': 'bar', 'baz': True}},
'd3': {},
}
| mpl-2.0 | Python |
b2c67e3733dacaaf2150913b8f3809c7f25814cf | Add function for file overwrite prompt | vividvilla/csvtotable | csvtotable/cli.py | csvtotable/cli.py | import os
import click
from csvtotable import convert
# Prompt for file overwrite
def prompt_overwrite(file_name):
# Skip if file doesn't exist
if not os.path.exists(file_name):
return
# Prompt for file overwrite if outfile already exists
fmt = "File ({}) already exists. Do you want to overwrite? (y/n): "
message = fmt.format(file_name)
click.secho(message, nl=False, fg="red")
choice = click.getchar()
click.echo()
if choice not in ("y", "Y"):
return False
return True
@click.command()
@click.argument("input_file", type=click.Path(exists=True))
@click.argument("output_file", type=click.Path())
@click.option("-c", "--caption", type=str, help="Table caption")
@click.option("-d", "--delimiter", type=str, default=",", help="CSV delimiter")
@click.option("-q", "--quotechar", type=str, default="|",
help="String used to quote fields containing special characters")
@click.option("-dl", "--display-length", type=int, default=-1,
help=("Number of rows to show by default. "
"Defaults to -1 (show all rows)"))
@click.option("-o", "--overwrite", type=bool, default=False, is_flag=True,
help="Overwrite the output file if exisits.")
def cli(input_file, output_file, caption, delimiter, quotechar,
display_length, overwrite):
"""
CSVtoTable commandline utility.
"""
# Prompt for file overwrite if outfile already exists
if not overwrite and os.path.exists(output_file):
fmt = "File ({}) already exists. Do you want to overwrite? (y/n): "
message = fmt.format(output_file)
click.secho(message, nl=False, fg="red")
choice = click.getchar()
click.echo()
if choice not in ("y", "Y"):
return True
# Convert CSV file
convert.convert(input_file, output_file, caption=caption,
delimiter=delimiter, quotechar=quotechar,
display_length=display_length)
if not overwrite and not prompt_overwrite(output_file):
raise click.Abort()
click.secho("File converted successfully: {}".format(
output_file), fg="green")
| import os
import click
from csvtotable import convert
@click.command()
@click.argument("input_file", type=click.Path(exists=True))
@click.argument("output_file", type=click.Path())
@click.option("-c", "--caption", type=str, help="Table caption")
@click.option("-d", "--delimiter", type=str, default=",", help="CSV delimiter")
@click.option("-q", "--quotechar", type=str, default="|",
help="String used to quote fields containing special characters")
@click.option("-dl", "--display-length", type=int, default=-1,
help=("Number of rows to show by default. "
"Defaults to -1 (show all rows)"))
@click.option("-o", "--overwrite", type=bool, default=False, is_flag=True,
help="Overwrite the output file if exisits.")
def cli(input_file, output_file, caption, delimiter, quotechar,
display_length, overwrite):
"""
CSVtoTable commandline utility.
"""
# Prompt for file overwrite if outfile already exists
if not overwrite and os.path.exists(output_file):
fmt = "File ({}) already exists. Do you want to overwrite? (y/n): "
message = fmt.format(output_file)
click.secho(message, nl=False, fg="red")
choice = click.getchar()
click.echo()
if choice not in ("y", "Y"):
return True
# Convert CSV file
convert.convert(input_file, output_file, caption=caption,
delimiter=delimiter, quotechar=quotechar,
display_length=display_length)
click.secho("File converted successfully: {}".format(
output_file), fg="green")
| mit | Python |
75a4d0ccff1af1aefa30efbb6fddc76f8e9db2fb | Add a quick interpolator. | ezekial4/atomic_neu,ezekial4/atomic_neu | xxdata_11.py | xxdata_11.py | import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
def convert_to_dictionary(out):
iz0, is1min, is1max, nptnl, nptn, nptnc, iptnla, iptna, iptnca, ncnct,\
icnctv, iblmx, ismax, dnr_ele, dnr_ams, isppr, ispbr, isstgr, idmax,\
itmax, ddens, dtev, drcof, lres, lstan, lptn = out
d = {}
d['charge'] = iz0
d['density'] = ddens[:idmax]
d['temperature'] = dtev[:itmax]
d['number_of_charge_states'] = ismax
d['coeff_table'] = drcof[:ismax, :itmax, :idmax]
return d
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
d = convert_to_dictionary(out)
from scipy.interpolate import RectBivariateSpline
coeffs = RectBivariateSpline(d['temperature'], d['density'], d['coeff_table'][0])
| import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
def convert_to_dictionary(out):
iz0, is1min, is1max, nptnl, nptn, nptnc, iptnla, iptna, iptnca, ncnct,\
icnctv, iblmx, ismax, dnr_ele, dnr_ams, isppr, ispbr, isstgr, idmax,\
itmax, ddens, dtev, drcof, lres, lstan, lptn = out
d = {}
d['charge'] = iz0
d['density'] = ddens[:idmax]
d['temperature'] = dtev[:itmax]
d['number_of_charge_states'] = ismax
d['coeff_table'] = drcof[:ismax, :itmax, :idmax]
return d
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
d = convert_to_dictionary(out)
| mit | Python |
44faee89580ab301e82d6fc5dea290da8fc63741 | Use regex based search for links. | punchagan/parktain,punchagan/parktain,punchagan/parktain | parktain/main.py | parktain/main.py | #!/usr/bin/env python
# Standard library
from datetime import datetime
from os.path import abspath, dirname, join
import re
# 3rd party library
from gendo import Gendo
from sqlalchemy.orm import sessionmaker
# Local library
from parktain.models import Base, engine, Message
Session = sessionmaker(bind=engine)
session = Session()
HERE = dirname(abspath(__file__))
config_path = join(HERE, 'config.yaml')
bot = Gendo.config_from_yaml(config_path)
def is_mention(f):
"""Decorator to check if bot is mentioned."""
def wrapped(user, channel, message):
BOT_ID_RE = re.compile('<@{}>'.format(bot.id))
mention = BOT_ID_RE.search(message) is not None
if mention:
return f(user, channel, message)
return wrapped
def all_messages(user, channel, message):
return True
URL_RE = re.compile('<(https{0,1}://.*?)>')
def message_has_url(user, channel, message):
return URL_RE.search(message) is not None
#### Bot Functions ############################################################
@bot.listen_for('where do you live')
@is_mention
def source_code(user, channel, message):
repo_url = 'https://github.com/punchagan/parktain'
message = 'Well, I live in your hearts...\nYou can change me from here {}, though.'
return message.format(repo_url)
@bot.cron('0 0 * * *')
def checkins_reminder():
date = datetime.now().strftime('%d %B, %Y')
bot.speak('Morning! What are you doing on {}!'.format(date), "#checkins")
@bot.listen_for(all_messages)
def logger(user, channel, message):
message_log = Message(user_id=user, channel_id=channel, message=message, timestamp=datetime.now())
session.add(message_log)
session.commit()
@bot.listen_for(message_has_url, target_channel='clickbaits', ignore_channels=['clickbaits'])
def link_repost(user, channel, message):
"""Repost links in any channel to target_channel."""
return '@{user.username} shared "%s"' % message
def main():
Base.metadata.create_all(engine)
bot.run()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# Standard library
from datetime import datetime
from os.path import abspath, dirname, join
import re
from urllib.parse import urlparse
# 3rd party library
from gendo import Gendo
from sqlalchemy.orm import sessionmaker
# Local library
from parktain.models import Base, engine, Message
Session = sessionmaker(bind=engine)
session = Session()
HERE = dirname(abspath(__file__))
config_path = join(HERE, 'config.yaml')
bot = Gendo.config_from_yaml(config_path)
def is_mention(f):
"""Decorator to check if bot is mentioned."""
def wrapped(user, channel, message):
BOT_ID_RE = re.compile('<@{}>'.format(bot.id))
mention = BOT_ID_RE.search(message) is not None
if mention:
return f(user, channel, message)
return wrapped
#### Bot Functions ############################################################
@bot.listen_for('where do you live')
@is_mention
def source_code(user, channel, message):
repo_url = 'https://github.com/punchagan/parktain'
message = 'Well, I live in your hearts...\nYou can change me from here {}, though.'
return message.format(repo_url)
@bot.cron('0 0 * * *')
def checkins_reminder():
date = datetime.now().strftime('%d %B, %Y')
bot.speak('Morning! What are you doing on {}!'.format(date), "#checkins")
@bot.listen_for(lambda user, channel, message: True, target_channel='clickbaits')
def logger(user, channel, message):
message_log = Message(user_id=user, channel_id=channel, message=message, timestamp=datetime.now())
session.add(message_log)
session.commit()
# Check for presence of HyperLink in message
if user == bot.id:
return
for word in message.split():
try:
o = urlparse(word[1:-1])
if o.netloc:
return '@{user.username} shared "%s"' %message
except IndexError:
pass
def main():
Base.metadata.create_all(engine)
bot.run()
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
2654bab5f8e57224395ddfaa1b0917dd98d0ad10 | Apply isort | thombashi/pathvalidate | test/test_symbol.py | test/test_symbol.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
import itertools
import pytest
from pathvalidate import (
InvalidCharError, ascii_symbol_list, replace_symbol, unprintable_ascii_char_list,
validate_symbol)
from ._common import alphanum_char_list
class Test_validate_symbol(object):
VALID_CHAR_LIST = alphanum_char_list
INVALID_CHAR_LIST = ascii_symbol_list
@pytest.mark.parametrize(["value"], [
["abc" + valid_char + "hoge123"] for valid_char in VALID_CHAR_LIST
])
def test_normal(self, value):
validate_symbol(value)
@pytest.mark.parametrize(["value"], [
["あいうえお"],
["シート"],
])
def test_normal_multibyte(self, value):
pytest.skip("TODO")
validate_symbol(value)
@pytest.mark.parametrize(["value"], [
["abc" + invalid_char + "hoge123"]
for invalid_char in INVALID_CHAR_LIST + unprintable_ascii_char_list
])
def test_exception_invalid_char(self, value):
with pytest.raises(InvalidCharError):
validate_symbol(value)
class Test_replace_symbol(object):
TARGET_CHAR_LIST = ascii_symbol_list
NOT_TARGET_CHAR_LIST = alphanum_char_list
REPLACE_TEXT_LIST = ["", "_"]
@pytest.mark.parametrize(
["value", "replace_text", "expected"],
[
["A" + c + "B", rep, "A" + rep + "B"]
for c, rep in itertools.product(TARGET_CHAR_LIST, REPLACE_TEXT_LIST)
] + [
["A" + c + "B", rep, "A" + c + "B"]
for c, rep in itertools.product(NOT_TARGET_CHAR_LIST, REPLACE_TEXT_LIST)
])
def test_normal(self, value, replace_text, expected):
assert replace_symbol(value, replace_text) == expected
@pytest.mark.parametrize(["value", "expected"], [
[None, TypeError],
[1, TypeError],
[True, TypeError],
])
def test_abnormal(self, value, expected):
with pytest.raises(expected):
replace_symbol(value)
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
import itertools
import pytest
from pathvalidate import (
InvalidCharError, replace_symbol, unprintable_ascii_char_list, ascii_symbol_list, validate_symbol)
from ._common import alphanum_char_list
class Test_validate_symbol(object):
VALID_CHAR_LIST = alphanum_char_list
INVALID_CHAR_LIST = ascii_symbol_list
@pytest.mark.parametrize(["value"], [
["abc" + valid_char + "hoge123"] for valid_char in VALID_CHAR_LIST
])
def test_normal(self, value):
validate_symbol(value)
@pytest.mark.parametrize(["value"], [
["あいうえお"],
["シート"],
])
def test_normal_multibyte(self, value):
pytest.skip("TODO")
validate_symbol(value)
@pytest.mark.parametrize(["value"], [
["abc" + invalid_char + "hoge123"]
for invalid_char in INVALID_CHAR_LIST + unprintable_ascii_char_list
])
def test_exception_invalid_char(self, value):
with pytest.raises(InvalidCharError):
validate_symbol(value)
class Test_replace_symbol(object):
TARGET_CHAR_LIST = ascii_symbol_list
NOT_TARGET_CHAR_LIST = alphanum_char_list
REPLACE_TEXT_LIST = ["", "_"]
@pytest.mark.parametrize(
["value", "replace_text", "expected"],
[
["A" + c + "B", rep, "A" + rep + "B"]
for c, rep in itertools.product(TARGET_CHAR_LIST, REPLACE_TEXT_LIST)
] + [
["A" + c + "B", rep, "A" + c + "B"]
for c, rep in itertools.product(NOT_TARGET_CHAR_LIST, REPLACE_TEXT_LIST)
])
def test_normal(self, value, replace_text, expected):
assert replace_symbol(value, replace_text) == expected
@pytest.mark.parametrize(["value", "expected"], [
[None, TypeError],
[1, TypeError],
[True, TypeError],
])
def test_abnormal(self, value, expected):
with pytest.raises(expected):
replace_symbol(value)
| mit | Python |
21edc71daaa7f681988528f797437210fff58a3a | set DRUID_IS_ACTIVE = False | dennisobrien/caravel-mysql-docker-example,dennisobrien/caravel-mysql-docker-example | caravel_config.py | caravel_config.py | """Minimal config to set SQLALCHEMY_DATABASE_URI from an environment variable.
"""
import logging
import os
# The SQLAlchemy connection string.
if os.environ.get('SQLALCHEMY_DATABASE_URI', None):
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
else:
logging.getLogger().warn('Falling back to SQLite database.')
SQLALCHEMY_DATABASE_URI = 'sqlite:////home/caravel/db/caravel.db'
logging.getLogger().info('Using SQLALCHEMY_DATABASE_URI: {}'.format(SQLALCHEMY_DATABASE_URI))
DRUID_IS_ACTIVE = False
| """Minimal config to set SQLALCHEMY_DATABASE_URI from an environment variable.
"""
import logging
import os
# The SQLAlchemy connection string.
if os.environ.get('SQLALCHEMY_DATABASE_URI', None):
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
else:
logging.getLogger().warn('Falling back to SQLite database.')
SQLALCHEMY_DATABASE_URI = 'sqlite:////home/caravel/db/caravel.db'
logging.getLogger().info('Using SQLALCHEMY_DATABASE_URI: {}'.format(SQLALCHEMY_DATABASE_URI))
| mit | Python |
136b9777b49be0e70f0e3c82662595285421a306 | add mktoken command to generate tokens for users | rjw57/cubbie | cubbie/manager.py | cubbie/manager.py | """
Command-line manager utility for cubbie.
"""
import logging
from flask import current_app
from flask.ext.migrate import MigrateCommand, Migrate
from flask.ext.script import Manager, Command
from mixer.backend.flask import mixer
from cubbie.webapp import create_app
from cubbie.model import db
from cubbie.model import User, Production, Performance, SalesDatum, Capability
from cubbie.fixture import (
create_user_fixtures, create_production_fixtures,
create_performance_fixtures, create_sales_fixtures, create_capability_fixtures
)
from cubbie.auth import make_user_token
def create_manager_app(config=None):
app = create_app()
if config is not None:
app.config.from_pyfile(config)
migrate = Migrate(app, db)
return app
manager = Manager(create_manager_app)
@manager.command
def genfake():
if not current_app.debug:
logging.error(
'genfake command is only available in deebug modfe. '
'Ensure that DEBUG is True in app config.'
)
return
db.create_all()
mixer.init_app(current_app)
create_user_fixtures(10)
create_production_fixtures(5)
create_capability_fixtures(10)
create_performance_fixtures(25)
create_sales_fixtures(200)
@manager.command
def mktoken(userid):
u = User.query.get(userid)
if u is None:
logging.error('No such user: %s' % userid)
return
token = make_user_token(u)
print(token)
manager.add_command('db', MigrateCommand)
manager.add_option('-c', '--config', dest='config', required=False)
def main():
"""Utility entry point."""
manager.run()
if __name__ == '__main__':
main()
| """
Command-line manager utility for cubbie.
"""
import logging
from flask import current_app
from flask.ext.migrate import MigrateCommand, Migrate
from flask.ext.script import Manager, Command
from mixer.backend.flask import mixer
from cubbie.webapp import create_app
from cubbie.model import db
from cubbie.model import User, Production, Performance, SalesDatum, Capability
from cubbie.fixture import (
create_user_fixtures, create_production_fixtures,
create_performance_fixtures, create_sales_fixtures, create_capability_fixtures
)
def create_manager_app(config=None):
app = create_app()
if config is not None:
app.config.from_pyfile(config)
migrate = Migrate(app, db)
return app
class GenFakeData(Command):
"generates fake data in the database"
def run(self):
if not current_app.debug:
logging.error(
'genfake command is only available in deebug modfe. '
'Ensure that DEBUG is True in app config.'
)
return
db.create_all()
mixer.init_app(current_app)
create_user_fixtures(10)
create_production_fixtures(5)
create_capability_fixtures(10)
create_performance_fixtures(25)
create_sales_fixtures(200)
manager = Manager(create_manager_app)
manager.add_command('db', MigrateCommand)
manager.add_command('genfake', GenFakeData)
manager.add_option('-c', '--config', dest='config', required=False)
def main():
"""Utility entry point."""
manager.run()
if __name__ == '__main__':
main()
| mit | Python |
f13b891a539546dfa810119b2da8738e6e0d6aaa | Update tests | evansloan082/sports.py | tests/score_test.py | tests/score_test.py | import json
import unittest
import sports_py
from sports_py.models import Match
class TestScores(unittest.TestCase):
match_data = {
'league': 'NHL',
'home_team': 'Pittsburgh Penguins',
'away_team': 'Nashville Predators',
'match_score': '2-0',
'match_date': 'Sat, 19 Aug 2017 02:12:05 GMT',
'match_time': 'Game Finished',
'match_link': 'test',
}
match = Match('hockey', match_data)
matches = sports_py.get_sport_scores('baseball')
def test_match(self):
self.assertIsNotNone(self.match)
def test_teams(self):
self.assertEqual(self.match.home_team, 'Pittsburgh Penguins')
self.assertEqual(self.match.away_team, 'Nashville Predators')
def test_score(self):
self.assertEqual(self.match.home_score, '2')
self.assertEqual(self.match.away_score, '0')
def test_date(self):
self.assertIsNotNone(self.match.match_date)
def test_sport(self):
self.assertEqual(self.match.sport, 'hockey')
def test_json(self):
try:
json.loads(self.match.to_json())
for match in self.matches:
json.loads(match.to_json())
self.test = True
except ValueError:
self.test = False
self.assertEqual(self.test, True)
if __name__ == '__main__':
unittest.main()
| import json
import unittest
import sports_py
from sports_py.models import Match
class TestScores(unittest.TestCase):
match_data = {
'league': 'NHL',
'home_team': 'Pittsburgh Penguins',
'away_team': 'Nashville Predators',
'match_score': '2-0',
'match_date': 'Sat, 19 Aug 2017 02:12:05 GMT',
'match_time': 'Game Finished',
'match_link': 'test',
}
match = Match('hockey', match_data)
matches = sports_py.get_sport_scores('baseball')
test = False
def test_match(self):
if self.match is not None:
self.test = True
self.assertEqual(self.test, True)
def test_teams(self):
self.assertEqual(self.match.home_team, 'Pittsburgh Penguins')
self.assertEqual(self.match.away_team, 'Nashville Predators')
def test_score(self):
self.assertEqual(self.match.home_score, '2')
self.assertEqual(self.match.away_score, '0')
def test_date(self):
if self.match.match_date is not None:
self.test = True
self.assertEqual(self.test, True)
def test_sport(self):
self.assertEqual(self.match.sport, 'hockey')
def test_json(self):
try:
json.loads(self.match.to_json())
for match in self.matches:
json.loads(match.to_json())
self.test = True
except ValueError:
self.test = False
self.assertEqual(self.test, True)
if __name__ == '__main__':
unittest.main()
| mit | Python |
64d75f6eabd526d7dc47c5f3c980831ab59a604e | Expand test suite | zenhack/python-gtkclassbuilder | tests/test_build.py | tests/test_build.py | from gtkclassbuilder import from_string
from gi.repository import Gtk
input = """<?xml version="1.0" encoding="UTF-8"?>
<!-- Generated with glade 3.18.3 -->
<interface>
<requires lib="gtk+" version="3.12"/>
<object class="GtkWindow" id="MainWindow">
<property name="can_focus">False</property>
<child>
<object class="GtkBox" id="box1">
<property name="visible">True</property>
<property name="can_focus">False</property>
<property name="orientation">vertical</property>
<child>
<object class="GtkLabel" id="label1">
<property name="visible">True</property>
<property name="can_focus">False</property>
<property name="label" translatable="yes">Hello, World!</property>
</object>
<packing>
<property name="expand">True</property>
<property name="fill">True</property>
<property name="position">0</property>
</packing>
</child>
<child>
<object class="GtkButton" id="button1">
<property name="label" translatable="yes">Goodbye</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
<property name="relief">none</property>
<signal name="clicked" handler="goodbye" swapped="no"/>
</object>
<packing>
<property name="expand">False</property>
<property name="fill">True</property>
<property name="position">1</property>
</packing>
</child>
</object>
</child>
</object>
</interface>
"""
classes = from_string(input)
w = classes['MainWindow']()
assert isinstance(w, Gtk.Window)
assert w.get_object('MainWindow') is w
assert isinstance(w.get_object('box1'), classes['box1'])
w2 = classes['MainWindow']()
assert w is not w2
assert w.get_object('box1') is not w2.get_object('box1')
| from gtkclassbuilder import from_string
from gi.repository import Gtk
input = """<?xml version="1.0" encoding="UTF-8"?>
<!-- Generated with glade 3.18.3 -->
<interface>
<requires lib="gtk+" version="3.12"/>
<object class="GtkWindow" id="MainWindow">
<property name="can_focus">False</property>
<child>
<object class="GtkBox" id="box1">
<property name="visible">True</property>
<property name="can_focus">False</property>
<property name="orientation">vertical</property>
<child>
<object class="GtkLabel" id="label1">
<property name="visible">True</property>
<property name="can_focus">False</property>
<property name="label" translatable="yes">Hello, World!</property>
</object>
<packing>
<property name="expand">True</property>
<property name="fill">True</property>
<property name="position">0</property>
</packing>
</child>
<child>
<object class="GtkButton" id="button1">
<property name="label" translatable="yes">Goodbye</property>
<property name="visible">True</property>
<property name="can_focus">True</property>
<property name="receives_default">True</property>
<property name="relief">none</property>
<signal name="clicked" handler="goodbye" swapped="no"/>
</object>
<packing>
<property name="expand">False</property>
<property name="fill">True</property>
<property name="position">1</property>
</packing>
</child>
</object>
</child>
</object>
</interface>
"""
classes = from_string(input)
assert isinstance(classes['MainWindow'](), Gtk.Window)
| lgpl-2.1 | Python |
33e8538e1dad39be1f8bff3a1340f0ecb32f0648 | Add a test case for copying EXIF data | kontza/sigal,kontza/sigal,cbosdo/sigal,jdn06/sigal,t-animal/sigal,franek/sigal,kontza/sigal,xouillet/sigal,Ferada/sigal,cbosdo/sigal,saimn/sigal,jdn06/sigal,jasuarez/sigal,elaOnMars/sigal,saimn/sigal,xouillet/sigal,jasuarez/sigal,jasuarez/sigal,cbosdo/sigal,elaOnMars/sigal,t-animal/sigal,xouillet/sigal,jdn06/sigal,Ferada/sigal,franek/sigal,t-animal/sigal,saimn/sigal,Ferada/sigal,muggenhor/sigal,muggenhor/sigal | tests/test_image.py | tests/test_image.py | # -*- coding:utf-8 -*-
import os
import pytest
from PIL import Image
from sigal import init_logging
from sigal.image import generate_image, generate_thumbnail, get_exif_tags
CURRENT_DIR = os.path.dirname(__file__)
TEST_IMAGE = 'exo20101028-b-full.jpg'
SRCFILE = os.path.join(CURRENT_DIR, 'sample', 'pictures', 'dir2', TEST_IMAGE)
def test_generate_image(tmpdir):
"Test the generate_image function."
dstfile = str(tmpdir.join(TEST_IMAGE))
for size in [(600, 600), (300, 200)]:
generate_image(SRCFILE, dstfile, size, None, method='ResizeToFill')
im = Image.open(dstfile)
assert im.size == size
def test_generate_image_processor(tmpdir):
"Test generate_image with a wrong processor name."
init_logging()
dstfile = str(tmpdir.join(TEST_IMAGE))
with pytest.raises(SystemExit):
generate_image(SRCFILE, dstfile, (200, 200), None,
method='WrongMethod')
def test_generate_thumbnail(tmpdir):
"Test the generate_thumbnail function."
dstfile = str(tmpdir.join(TEST_IMAGE))
for size in [(200, 150), (150, 200)]:
generate_thumbnail(SRCFILE, dstfile, size, None)
im = Image.open(dstfile)
assert im.size == size
for size, thumb_size in [((200, 150), (185, 150)),
((150, 200), (150, 122))]:
generate_thumbnail(SRCFILE, dstfile, size, None, fit=False)
im = Image.open(dstfile)
assert im.size == thumb_size
def test_exif_copy(tmpdir):
"Test if EXIF data can transfered copied to the resized image."
test_image = '11.jpg'
src_file = os.path.join(CURRENT_DIR, 'sample', 'pictures', 'dir1', 'test1',
test_image)
dst_file = str(tmpdir.join(test_image))
generate_image(src_file, dst_file, (300, 400), None, copy_exif_data=True)
tags = get_exif_tags(dst_file)
assert tags['simple']['iso'] == 50
generate_image(src_file, dst_file, (300, 400), None, copy_exif_data=False)
assert not get_exif_tags(dst_file)
| # -*- coding:utf-8 -*-
import os
import pytest
from PIL import Image
from sigal import init_logging
from sigal.image import generate_image, generate_thumbnail
CURRENT_DIR = os.path.dirname(__file__)
TEST_IMAGE = 'exo20101028-b-full.jpg'
SRCFILE = os.path.join(CURRENT_DIR, 'sample', 'pictures', 'dir2', TEST_IMAGE)
def test_generate_image(tmpdir):
"Test the generate_image function."
dstfile = str(tmpdir.join(TEST_IMAGE))
for size in [(600, 600), (300, 200)]:
generate_image(SRCFILE, dstfile, size, None, method='ResizeToFill')
im = Image.open(dstfile)
assert im.size == size
def test_generate_image_processor(tmpdir):
"Test generate_image with a wrong processor name."
init_logging()
dstfile = str(tmpdir.join(TEST_IMAGE))
with pytest.raises(SystemExit):
generate_image(SRCFILE, dstfile, (200, 200), None,
method='WrongMethod')
def test_generate_thumbnail(tmpdir):
"Test the generate_thumbnail function."
dstfile = str(tmpdir.join(TEST_IMAGE))
for size in [(200, 150), (150, 200)]:
generate_thumbnail(SRCFILE, dstfile, size, None)
im = Image.open(dstfile)
assert im.size == size
for size, thumb_size in [((200, 150), (185, 150)),
((150, 200), (150, 122))]:
generate_thumbnail(SRCFILE, dstfile, size, None, fit=False)
im = Image.open(dstfile)
assert im.size == thumb_size
| mit | Python |
c71d578400fcd943db8115b9ef4eeef93adfaaa6 | Add more plotting tests | TheBB/badger | tests/test_plots.py | tests/test_plots.py | import csv
from pathlib import Path
import numpy as np
from badger import Case
DATADIR = Path(__file__).parent / 'data'
def read_csv(path: Path):
with open(path, 'r', newline='') as f:
reader = csv.reader(f)
headers = next(reader)
data = np.array([list(map(float, row)) for row in reader])
return headers, data
def test_plots():
case = Case(DATADIR / 'run' / 'plot')
case.clear_cache()
case.run()
root = DATADIR / 'run' / 'plot' / '.badgerdata'
headers, data = read_csv(root / 'i-vs-isq.csv')
assert headers == ['isq (x-axis)', 'isq']
np.testing.assert_array_equal(data, [[1, 1], [2, 4], [3, 9], [4, 16], [5, 25]])
headers, data = read_csv(root / 'misc-vs-i-a.csv')
assert headers == ['misc (x-axis)', 'misc']
np.testing.assert_array_equal(data, [
[1, 98 + 31/5],
[2, 99 + 31/5],
[3, 100 + 31/5],
[4, 101 + 31/5],
[5, 102 + 31/5],
])
headers, data = read_csv(root / 'misc-vs-i-b.csv')
assert headers == ['misc (x-axis)', 'misc']
np.testing.assert_array_equal(data, [
[1, 99 + 31/5],
[2, 100 + 31/5],
[3, 101 + 31/5],
[4, 102 + 31/5],
[5, 103 + 31/5],
])
headers, data = read_csv(root / 'misc-vs-i-c.csv')
assert headers == ['misc (x-axis)', 'misc']
np.testing.assert_array_equal(data, [
[1, 100 + 31/5],
[2, 101 + 31/5],
[3, 102 + 31/5],
[4, 103 + 31/5],
[5, 104 + 31/5],
])
headers, data = read_csv(root / 'fresult.csv')
assert headers == ['fresult (x-axis)', 'fresult']
np.testing.assert_array_equal(data.T, [
np.arange(1, 11),
np.arange(1, 11),
])
| import csv
from pathlib import Path
import numpy as np
from badger import Case
DATADIR = Path(__file__).parent / 'data'
def read_csv(path: Path):
with open(path, 'r', newline='') as f:
reader = csv.reader(f)
headers = next(reader)
data = np.array([list(map(float, row)) for row in reader])
return headers, data
def test_plots():
case = Case(DATADIR / 'run' / 'plot')
case.clear_cache()
case.run()
root = DATADIR / 'run' / 'plot' / '.badgerdata'
headers, data = read_csv(root / 'i-vs-isq.csv')
assert headers == ['isq (x-axis)', 'isq']
np.testing.assert_array_equal(data, [[1, 1], [2, 4], [3, 9], [4, 16], [5, 25]])
| agpl-3.0 | Python |
5d32f809685b34b296d98c9a218019f341ce3eab | Add scene classification test | TUT-ARG/sed_eval | tests/test_scene.py | tests/test_scene.py | '''
Unit tests for scene classification
'''
import numpy
import nose.tools
import sed_eval
def test_direct_use():
reference = sed_eval.util.SceneList([
{
'scene_label': 'supermarket',
'file': 'supermarket09.wav'
},
{
'scene_label': 'tubestation',
'file': 'tubestation10.wav'
},
{
'scene_label': 'quietstreet',
'file': 'quietstreet08.wav'
},
{
'scene_label': 'office',
'file': 'office10.wav'
},
{
'scene_label': 'bus',
'file': 'bus01.wav'
},
])
estimated = sed_eval.util.SceneList([
{
'scene_label': 'supermarket',
'file': 'supermarket09.wav'
},
{
'scene_label': 'bus',
'file': 'tubestation10.wav'
},
{
'scene_label': 'quietstreet',
'file': 'quietstreet08.wav'
},
{
'scene_label': 'park',
'file': 'office10.wav'
},
{
'scene_label': 'car',
'file': 'bus01.wav'
},
])
scene_labels = sed_eval.sound_event.util.unique_scene_labels(reference)
scene_metrics = sed_eval.scene.SceneClassificationMetrics(scene_labels)
scene_metrics.evaluate(
reference_scene_list=reference,
estimated_scene_list=estimated
)
results = scene_metrics.results()
nose.tools.assert_almost_equals(results['overall']['accuracy'], 0.4)
| '''
Unit tests for scene classification
'''
import numpy
import nose.tools
import sed_eval
| mit | Python |
29234853c58eee19455a0a2e69cb7afdf0432bcc | improve test_utils.py | aioworkers/aioworkers | tests/test_utils.py | tests/test_utils.py | import os
from unittest import mock
from aioworkers import utils
def test_urandom_seed():
utils.random_seed()
def test_random_seed():
def not_implemented_urandom(*args):
raise NotImplementedError
with mock.patch.object(os, "urandom", not_implemented_urandom):
utils.random_seed()
| from aioworkers import utils
def test_random_seed():
utils.random_seed()
| apache-2.0 | Python |
0fc647adfd60d1f926f1e12d3c18cc238564392b | bump version 0.0.9 >> 0.1.1 | abe-winter/pg13-py | pg13/__init__.py | pg13/__init__.py | import misc,diff,pg,syncschema
# don't import pgmock and stubredis -- they're only useful for test mode or nonstandard env (i.e. stubredis on windows)
# don't import redismodel by default -- it has an extra msgpack dependency
__version__ = '0.1.1'
| import misc,diff,pg,syncschema
# don't import pgmock and stubredis -- they're only useful for test mode or nonstandard env (i.e. stubredis on windows)
# don't import redismodel by default -- it has an extra msgpack dependency
__version__ = '0.0.9'
| mit | Python |
1ebd836c890bc55013f16ccd1443e6c6daab6b65 | create frist version and set number to 0.0.1 | byteweaver/django-tickets,Christophe31/django-tickets,byteweaver/django-tickets,Christophe31/django-tickets | tickets/__init__.py | tickets/__init__.py | __version__ = '0.0.1'
| __version__ = '0.0.0'
| bsd-3-clause | Python |
d746f7dae64bb75420d39b9e54b4df53b2f91cdd | Update script.py | TingPing/plugins,TingPing/plugins | HexChat/script.py | HexChat/script.py | import os
import sys
if sys.version_info[0] < 3:
import urllib as request
else:
import urllib.request as request
import hexchat
__module_name__ = 'Script'
__module_author__ = 'TingPing'
__module_version__ = '3'
__module_description__ = 'Download scripts'
script_help = 'Script: Valid commands are:\n \
INSTALL script\n \
UPDATE script\n \
REMOVE script'
addon_dir = os.path.join(hexchat.get_info('configdir'), 'addons')
# Store as preference?
addon_types = ['py', 'pl', 'lua', 'tcl', 'js']
addon_sites = ['http://raw.github.com/TingPing/plugins/master/HexChat/',
'https://raw.github.com/Arnavion/random/master/hexchat/',
'http://orvp.net/xchat/']
def expand_script(script):
return os.path.join(addon_dir, script)
def download(script):
if script.partition('.')[2] not in addon_types:
print('Script: Not a valid script file type.')
return False
for site in addon_sites:
if request.urlopen(site + script).getcode() == 200:
print('Script: Downloading {}...'.format(script))
request.urlretrieve(site + script, expand_script(script))
return True
print('Script: Could not find {}'.format(script))
def script_cb(word, word_eol, userdata):
if len(word) > 2:
cmd = word[1].lower()
arg = word[2]
else:
hexchat.command('help script')
return hexchat.EAT_ALL
if cmd == 'install':
if os.path.exists(expand_script(arg)):
print('Script: {} is already installed.'.format(arg))
return hexchat.EAT_ALL
if download(arg):
hexchat.command('timer 5 load ' + expand_script(arg))
elif cmd == 'update':
if arg == 'script.py':
print('Script: I cannot update myself.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)) and download(arg):
hexchat.command('timer 5 reload ' + arg)
elif cmd == 'remove':
if arg == 'script.py':
print('Script: I refuse.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)):
hexchat.command('unload ' + expand_script(arg))
os.remove(expand_script(arg))
else:
print('Script: {} is not installed.'.format(arg))
else:
hexchat.command('help script')
return hexchat.EAT_ALL
def unload_callback(userdata):
print('{} version {} unloaded'.format(__module_name__, __module_version__))
hexchat.hook_command('script', script_cb, help=script_help)
hexchat.hook_unload(unload_callback)
print('{} version {} loaded'.format(__module_name__, __module_version__))
| import os
import sys
if sys.version_info[0] < 3:
import urllib as request
else:
import urllib.request as request
import hexchat
__module_name__ = 'Script'
__module_author__ = 'TingPing'
__module_version__ = '3'
__module_description__ = 'Download scripts'
script_help = 'Script: Valid commands are:\n \
INSTALL script\n \
UPDATE script\n \
REMOVE script'
addon_dir = os.path.join(hexchat.get_info('configdir'), 'addons')
# Store as preference?
addon_types = ['py', 'pl', 'lua', 'tcl']
addon_sites = ['http://raw.github.com/TingPing/plugins/master/HexChat/',
'https://raw.github.com/Arnavion/random/master/hexchat/',
'http://orvp.net/xchat/']
def expand_script(script):
return os.path.join(addon_dir, script)
def download(script):
if script.partition('.')[2] not in addon_types:
print('Script: Not a valid script file type.')
return False
for site in addon_sites:
if request.urlopen(site + script).getcode() == 200:
print('Script: Downloading {}...'.format(script))
request.urlretrieve(site + script, expand_script(script))
return True
print('Script: Could not find {}'.format(script))
def script_cb(word, word_eol, userdata):
if len(word) > 2:
cmd = word[1].lower()
arg = word[2]
else:
hexchat.command('help script')
return hexchat.EAT_ALL
if cmd == 'install':
if os.path.exists(expand_script(arg)):
print('Script: {} is already installed.'.format(arg))
return hexchat.EAT_ALL
if download(arg):
hexchat.command('timer 5 load ' + expand_script(arg))
elif cmd == 'update':
if arg == 'script.py':
print('Script: I cannot update myself.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)) and download(arg):
hexchat.command('timer 5 reload ' + arg)
elif cmd == 'remove':
if arg == 'script.py':
print('Script: I refuse.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)):
hexchat.command('unload ' + expand_script(arg))
os.remove(expand_script(arg))
else:
print('Script: {} is not installed.'.format(arg))
else:
hexchat.command('help script')
return hexchat.EAT_ALL
def unload_callback(userdata):
print('{} version {} unloaded'.format(__module_name__, __module_version__))
hexchat.hook_command('script', script_cb, help=script_help)
hexchat.hook_unload(unload_callback)
print('{} version {} loaded'.format(__module_name__, __module_version__))
| mit | Python |
138c963d55bda492a523411fe24aa076f2910b37 | Update AFRL_demo.py | dm6718/RITSAR | examples/AFRL_demo.py | examples/AFRL_demo.py | ##############################################################################
# #
# This is a demonstration of the ritsar toolset using AFRL Gotcha data. #
# Algorithms can be switched in and out by commenting/uncommenting #
# the lines of code below. #
# #
##############################################################################
#Add include directories to default path list
from sys import path
path.append('../')
#Include standard library dependencies
import numpy as np
import matplotlib.pylab as plt
from matplotlib import cm
cmap = cm.Greys_r
#Include SARIT toolset
from ritsar import phsRead
from ritsar import imgTools
#Define top level directory containing *.mat file
#and choose polarization and starting azimuth
pol = 'HH'
directory = './data/AFRL/pass1'
start_az = 1
#Import phase history and create platform dictionary
[phs, platform] = phsRead.AFRL(directory, pol, start_az, n_az = 4)
#Create image plane dictionary
img_plane = imgTools.img_plane_dict(platform, res_factor = 1.5, upsample = True, aspect = 1.0)
#Apply algorithm of choice to phase history data
img_bp = imgTools.backprojection(phs, platform, img_plane, taylor = 43, upsample = 6)
img_pf = imgTools.polar_format(phs, platform, img_plane, taylor = 43)
#Output image
plt.imshow(np.abs(img_bp)**0.5, cmap = cm.Greys_r)
plt.title('Backprojection')
#Autofocus image
print('autofocusing')
img_af, af_ph = imgTools.autoFocus(img_bp, win = 0, win_params = [300,0.8])
#img_af, af_ph = imgTools.autoFocus(img_pf, win = 0, win_params = [300,0.8])
#Output autofocused image
plt.figure()
plt.imshow(np.abs(img_af)**0.5, cmap = cm.Greys_r)
plt.title('Autofocused Polar Format')
| ##############################################################################
# #
# This is a demonstration of the ritsar toolset using AFRL Gotcha data. #
# Algorithms can be switched in and out by commenting/uncommenting #
# the lines of code below. #
# #
##############################################################################
#Add include directories to default path list
from sys import path
path.append('../')
#Include standard library dependencies
import numpy as np
import matplotlib.pylab as plt
from matplotlib import cm
cmap = cm.Greys_r
#Include SARIT toolset
from ritsar import phsRead
from ritsar import phsTools
from ritsar import imgTools
#Define top level directory containing *.mat file
#and choose polarization and starting azimuth
pol = 'HH'
directory = './data/AFRL/pass1'
start_az = 1
#Import phase history and create platform dictionary
[phs, platform] = phsRead.AFRL(directory, pol, start_az, n_az = 4)
#Correct for reisdual video phase
phs_corr = phsTools.RVP_correct(phs, platform)
#Create image plane dictionary
img_plane = imgTools.img_plane_dict(platform, res_factor = 1.5, upsample = True, aspect = 1.0)
#Apply algorithm of choice to phase history data
img_bp = imgTools.backprojection(phs_corr, platform, img_plane, taylor = 43, upsample = 6)
img_pf = imgTools.polar_format(phs_corr, platform, img_plane, taylor = 43)
#Output image
plt.imshow(np.abs(img_bp)**0.5, cmap = cm.Greys_r)
plt.title('Backprojection')
#Autofocus image
print('autofocusing')
img_af, af_ph = imgTools.autoFocus(img_bp, win = 0, win_params = [300,0.8])
#img_af, af_ph = imgTools.autoFocus(img_pf, win = 0, win_params = [300,0.8])
#Output autofocused image
plt.figure()
plt.imshow(np.abs(img_af)**0.5, cmap = cm.Greys_r)
plt.title('Autofocused Polar Format')
| mit | Python |
99e97a2e658591401e95898fb32b13a12503190e | fix test conflict with webservice in enterprise | frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe | contrib/django/freppledb/execute/tests/test_importexportfolder.py | contrib/django/freppledb/execute/tests/test_importexportfolder.py | #
# Copyright (C) 2007-2016 by frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import os.path
import unittest
from django.conf import settings
from django.core import management
from django.db import DEFAULT_DB_ALIAS
from django.test import TransactionTestCase
from django.test.utils import override_settings
import freppledb.input as input
@override_settings(INSTALLED_APPS=settings.INSTALLED_APPS + ('django.contrib.sessions',))
class execute_with_commands(TransactionTestCase):
fixtures = ["demo"]
def setUp(self):
# Make sure the test database is used
os.environ['FREPPLE_TEST'] = "YES"
@unittest.skipUnless(
os.path.isdir(settings.DATABASES[DEFAULT_DB_ALIAS].get('FILEUPLOADFOLDER', '')),
"Requires FILEUPLOADFOLDER to be configured"
)
def test_exportimportfromfolder(self):
# Run frePPLe on the test database. No longer needed because records are already in the fixture, in Enterprise conficts with webservice
#management.call_command('frepple_run', plantype=1, constraint=15, env='supply')
self.assertTrue(input.models.ManufacturingOrder.objects.count() > 30)
self.assertTrue(input.models.PurchaseOrder.objects.count() > 20)
self.assertTrue(input.models.DistributionOrder.objects.count() > 0)
#the exporttofolder filters by status so the count must also filter
countMO = input.models.ManufacturingOrder.objects.filter(status = 'proposed').count()
countPO = input.models.PurchaseOrder.objects.filter(status = 'proposed').count()
countDO = input.models.DistributionOrder.objects.filter(status = 'proposed').count()
management.call_command('frepple_exporttofolder', )
input.models.ManufacturingOrder.objects.all().delete()
input.models.DistributionOrder.objects.all().delete()
input.models.PurchaseOrder.objects.all().delete()
self.assertEqual(input.models.DistributionOrder.objects.count(), 0)
self.assertEqual(input.models.PurchaseOrder.objects.count(),0)
self.assertEqual(input.models.ManufacturingOrder.objects.count(), 0)
management.call_command('frepple_importfromfolder', )
self.assertEqual(input.models.DistributionOrder.objects.count(), countDO)
self.assertEqual(input.models.PurchaseOrder.objects.count(), countPO)
self.assertEqual(input.models.ManufacturingOrder.objects.count(), countMO)
| #
# Copyright (C) 2007-2016 by frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import os.path
import unittest
from django.conf import settings
from django.core import management
from django.db import DEFAULT_DB_ALIAS
from django.test import TransactionTestCase
from django.test.utils import override_settings
import freppledb.input as input
@override_settings(INSTALLED_APPS=settings.INSTALLED_APPS + ('django.contrib.sessions',))
class execute_with_commands(TransactionTestCase):
fixtures = ["demo"]
def setUp(self):
# Make sure the test database is used
os.environ['FREPPLE_TEST'] = "YES"
@unittest.skipUnless(
os.path.isdir(settings.DATABASES[DEFAULT_DB_ALIAS].get('FILEUPLOADFOLDER', '')),
"Requires FILEUPLOADFOLDER to be configured"
)
def test_exportimportfromfolder(self):
# Run frePPLe on the test database.
management.call_command('frepple_run', plantype=1, constraint=15, env='supply')
self.assertTrue(input.models.ManufacturingOrder.objects.count() > 30)
self.assertTrue(input.models.PurchaseOrder.objects.count() > 20)
self.assertTrue(input.models.DistributionOrder.objects.count() > 0)
#the exporttofolder filters by status so the count must also filter
countMO = input.models.ManufacturingOrder.objects.filter(status = 'proposed').count()
countPO = input.models.PurchaseOrder.objects.filter(status = 'proposed').count()
countDO = input.models.DistributionOrder.objects.filter(status = 'proposed').count()
management.call_command('frepple_exporttofolder', )
input.models.ManufacturingOrder.objects.all().delete()
input.models.DistributionOrder.objects.all().delete()
input.models.PurchaseOrder.objects.all().delete()
self.assertEqual(input.models.DistributionOrder.objects.count(), 0)
self.assertEqual(input.models.PurchaseOrder.objects.count(),0)
self.assertEqual(input.models.ManufacturingOrder.objects.count(), 0)
management.call_command('frepple_importfromfolder', )
self.assertEqual(input.models.DistributionOrder.objects.count(), countDO)
self.assertEqual(input.models.PurchaseOrder.objects.count(), countPO)
self.assertEqual(input.models.ManufacturingOrder.objects.count(), countMO)
| agpl-3.0 | Python |
d3ff4084fcc8492398e87e2c7d5bb99132a133db | Add the tools/console (execute launch.py to run it) | v-legoff/pa-poc1 | launch.py | launch.py | # Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This temporary script contains a light demonstration of the POC ability.
It needs the 'datas' subdirectory, created in this directory
and the user.yml file, included in the repository.
"""
from model import *
from dc.sqlite3 import Sqlite3Connector
from tools.console import Console
class User(Model):
"""A user model."""
username = String()
def __repr__(self):
return "<user id={}, username={}>".format(self.id, repr(self.username))
# Load the stored datas
connector = Sqlite3Connector()
Model.data_connector = connector
connector.setup("data.db")
connector.record_tables([User])
console = Console({"Model": Model, "User": User})
console.launch()
| # Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This temporary script contains a light demonstration of the POC ability.
It needs the 'datas' subdirectory, created in this directory
and the user.yml file, included in the repository.
"""
from model import *
from dc.sqlite3 import Sqlite3Connector
class User(Model):
"""A user model."""
username = String()
def __repr__(self):
return "<user id={}, username={}>".format(self.id, repr(self.username))
# Load the stored datas
connector = Sqlite3Connector()
Model.data_connector = connector
connector.setup("data.db")
connector.record_tables([User])
names = ("Kredh", "Nitrate", "I don't have a clue", "When I'll get older",
"green mouse", "to destroy")
for name in names:
User(username=name)
user = User.find(4)
user.username = "I will be stronger"
u = User.find(6)
u.delete()
connector.connection.commit()
print(User.get_all())
# Before closing the program, we must stop the data connector
Model.data_connector = None
| bsd-3-clause | Python |
522084f5de4246b3b98293e9fa7320419a88d3a0 | Add --stdin-filename option support (fixes #40) | roadhump/SublimeLinter-eslint,joeybaker/SublimeLinter-textlint,AndBicScadMedia/SublimeLinter-eslint | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by roadhump
# Copyright (c) 2014 roadhump
#
# License: MIT
#
"""This module exports the ESLint plugin class."""
from SublimeLinter.lint import NodeLinter
class ESLint(NodeLinter):
"""Provides an interface to the eslint executable."""
syntax = ('javascript', 'html', 'javascriptnext', 'javascript (babel)', 'javascript (jsx)')
npm_name = 'eslint'
cmd = ('eslint', '--format', 'compact', '--stdin', '--stdin-filename', '@')
version_args = '--version'
version_re = r'v(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.12.0'
regex = (
r'^.+?: line (?P<line>\d+), col (?P<col>\d+), '
r'(?:(?P<error>Error)|(?P<warning>Warning)) - '
r'(?P<message>.+)'
)
line_col_base = (1, 0)
selectors = {
'html': 'source.js.embedded.html'
}
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by roadhump
# Copyright (c) 2014 roadhump
#
# License: MIT
#
"""This module exports the ESLint plugin class."""
from SublimeLinter.lint import NodeLinter
class ESLint(NodeLinter):
"""Provides an interface to the eslint executable."""
syntax = ('javascript', 'html', 'javascriptnext', 'javascript (babel)', 'javascript (jsx)')
npm_name = 'eslint'
cmd = 'eslint --format=compact --stdin'
version_args = '--version'
version_re = r'v(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.12.0'
regex = (
r'^.+?: line (?P<line>\d+), col (?P<col>\d+), '
r'(?:(?P<error>Error)|(?P<warning>Warning)) - '
r'(?P<message>.+)'
)
line_col_base = (1, 0)
selectors = {
'html': 'source.js.embedded.html'
}
| mit | Python |
f5f77eedda3dc68c686796257bb53ddde82c6a78 | fix version parsing | maxgalbu/SublimeLinter-contrib-twiglint | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by m.galbusera
# Copyright (c) 2014 m.galbusera
#
# License: MIT
#
"""This module exports the Twiglint plugin class."""
from SublimeLinter.lint import Linter
class Twiglint(Linter):
"""Provides an interface to twiglint."""
syntax = ('twig', 'html (twig)', 'html')
cmd = 'twig-lint lint'
version_args = '--version'
version_re = r'v(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 1.0.1'
regex = (
r'(?ism)KO in [^(]+\(line (?P<line>\d+)\).+?>> \d+ .+?>> (?P<message>[^\r\n]+)'
)
multiline = True
line_col_base = (1, 1)
tempfile_suffix = 'twig'
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by m.galbusera
# Copyright (c) 2014 m.galbusera
#
# License: MIT
#
"""This module exports the Twiglint plugin class."""
from SublimeLinter.lint import Linter
class Twiglint(Linter):
"""Provides an interface to twiglint."""
syntax = ('twig', 'html (twig)', 'html')
cmd = 'twig-lint lint'
version_args = '--version'
version_re = r'version v(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 1.0.1'
regex = (
r'(?ism)KO in [^(]+\(line (?P<line>\d+)\).+?>> \d+ .+?>> (?P<message>[^\r\n]+)'
)
multiline = True
line_col_base = (1, 1)
tempfile_suffix = 'twig'
| mit | Python |
daff8c9c95cdd2f1f85162a1c1608a4da78b834d | use self.settings not self.get_view_settings | SublimeLinter/SublimeLinter-json | linter.py | linter.py | import json
import os.path
import re
import sublime
from SublimeLinter.lint import Linter
class JSON(Linter):
cmd = None
loose_regex = re.compile(r'^.+: (?P<message>.+) in \(data\):(?P<line>\d+):(?P<col>\d+)')
strict_regex = re.compile(r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)')
regex = loose_regex
defaults = {
'selector': 'source.json',
'strict': True
}
def run(self, cmd, code):
"""
Attempt to parse code as JSON.
Returns '' if it succeeds, the error message if it fails.
Use ST's loose parser for its setting files, or when specified.
"""
is_sublime_file = os.path.splitext(self.filename)[1].startswith('.sublime-')
if self.settings.get('strict') and not is_sublime_file:
strict = True
else:
strict = False
try:
if strict:
self.regex = self.strict_regex
json.loads(code)
else:
self.regex = self.loose_regex
sublime.decode_value(code)
return ''
except ValueError as err:
return str(err)
| import json
import os.path
import re
import sublime
from SublimeLinter.lint import Linter
class JSON(Linter):
cmd = None
loose_regex = re.compile(r'^.+: (?P<message>.+) in \(data\):(?P<line>\d+):(?P<col>\d+)')
strict_regex = re.compile(r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)')
regex = loose_regex
defaults = {
'selector': 'source.json',
'strict': True
}
def run(self, cmd, code):
"""
Attempt to parse code as JSON.
Returns '' if it succeeds, the error message if it fails.
Use ST's loose parser for its setting files, or when specified.
"""
is_sublime_file = os.path.splitext(self.filename)[1].startswith('.sublime-')
if self.get_view_settings().get('strict') and not is_sublime_file:
strict = True
else:
strict = False
try:
if strict:
self.regex = self.strict_regex
json.loads(code)
else:
self.regex = self.loose_regex
sublime.decode_value(code)
return ''
except ValueError as err:
return str(err)
| mit | Python |
cfab5f78fba799d62fb6952bcca1984b2175df37 | Remove commented out version attributes and redundant auxdir arg | jawshooah/SublimeLinter-contrib-scalastyle | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Josh Hagins
# Copyright (c) 2014 Josh Hagins
#
# License: MIT
#
"""This module exports the Scalastyle plugin class."""
from os import path
from SublimeLinter.lint import Linter, util
class Scalastyle(Linter):
"""Provides an interface to scalastyle."""
syntax = 'scala'
executable = 'java'
cmd = None
config_file = ('--config', 'scalastyle-config.xml')
regex = (
r'^(?:(?P<error>error)|(?P<warning>warning)) '
r'(?:file=(?P<file>.+?)) '
r'(?:message=(?P<message>[^\r\n]+)) '
r'(?:line=(?P<line>\d+)) '
r'(?:column=(?P<col>\d+))$'
)
multiline = False
line_col_base = (1, 0)
tempfile_suffix = '-'
error_stream = util.STREAM_BOTH
selectors = {}
word_re = r'^([-\w]+|([\'"])[-\w ]+\2)'
defaults = {
'jarfile': ''
}
inline_settings = None
inline_overrides = None
comment_re = None
def cmd(self):
"""Return the command line to execute."""
jarfile = self.get_jarfile_path()
return [self.executable_path, '-jar', jarfile]
def get_jarfile_path(self):
"""
Return the absolute path to the scalastyle jarfile.
Expand user shortcut (~) and environment variables.
"""
settings = self.get_view_settings()
jarfile = settings.get('jarfile')
# Expand user directory shortcuts
jarfile = path.expanduser(jarfile)
# Expand environment variables
jarfile = path.expandvars(jarfile)
return jarfile
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Josh Hagins
# Copyright (c) 2014 Josh Hagins
#
# License: MIT
#
"""This module exports the Scalastyle plugin class."""
from os import path
from SublimeLinter.lint import Linter, util
class Scalastyle(Linter):
"""Provides an interface to scalastyle."""
syntax = 'scala'
executable = 'java'
cmd = None
config_file = ('--config', 'scalastyle-config.xml', '~')
# version_args = '--version'
# version_re = r'^scalastyle (?P<version>\d+\.\d+\.\d+)$'
# version_requirement = '>= 0.5'
regex = (
r'^(?:(?P<error>error)|(?P<warning>warning)) '
r'(?:file=(?P<file>.+?)) '
r'(?:message=(?P<message>[^\r\n]+)) '
r'(?:line=(?P<line>\d+)) '
r'(?:column=(?P<col>\d+))$'
)
multiline = False
line_col_base = (1, 0)
tempfile_suffix = '-'
error_stream = util.STREAM_BOTH
selectors = {}
word_re = r'^([-\w]+|([\'"])[-\w ]+\2)'
defaults = {
'jarfile': ''
}
inline_settings = None
inline_overrides = None
comment_re = None
def cmd(self):
"""Return the command line to execute."""
jarfile = self.get_jarfile_path()
return [self.executable_path, '-jar', jarfile]
def get_jarfile_path(self):
"""
Return the absolute path to the scalastyle jarfile.
Expand user shortcut (~) and environment variables.
"""
settings = self.get_view_settings()
jarfile = settings.get('jarfile')
# Expand user directory shortcuts
jarfile = path.expanduser(jarfile)
# Expand environment variables
jarfile = path.expandvars(jarfile)
return jarfile
| mit | Python |
fa98d97bca529d80dd6b5f29502a5618280a0a87 | Remove unused import | ReactiveX/RxPY,ReactiveX/RxPY | rx/concurrency/scheduleditem.py | rx/concurrency/scheduleditem.py | from typing import Any
from rx.core import Scheduler, typing
from rx.disposables import SingleAssignmentDisposable
def default_sub_comparer(x, y):
return 0 if x == y else 1 if x > y else -1
class ScheduledItem:
def __init__(self, scheduler: Scheduler, state: Any, action: typing.ScheduledAction, duetime: typing.AbsoluteTime):
self.scheduler = scheduler
self.state = state
self.action = action
self.duetime = duetime
self.disposable: typing.Disposable = SingleAssignmentDisposable()
def invoke(self):
ret = self.scheduler.invoke_action(self.action, self.state)
self.disposable.disposable = ret
def cancel(self):
"""Cancels the work item by disposing the resource returned by
invoke_core as soon as possible."""
self.disposable.dispose()
def is_cancelled(self):
return self.disposable.is_disposed
def __lt__(self, other):
return self.duetime < other.duetime
def __gt__(self, other):
return self.duetime > other.duetime
def __eq__(self, other):
return self.duetime == other.duetime
| from typing import Any
from datetime import datetime
from rx.core import Scheduler, typing
from rx.disposables import SingleAssignmentDisposable
def default_sub_comparer(x, y):
return 0 if x == y else 1 if x > y else -1
class ScheduledItem:
def __init__(self, scheduler: Scheduler, state: Any, action: typing.ScheduledAction, duetime: typing.AbsoluteTime):
self.scheduler = scheduler
self.state = state
self.action = action
self.duetime = duetime
self.disposable: typing.Disposable = SingleAssignmentDisposable()
def invoke(self):
ret = self.scheduler.invoke_action(self.action, self.state)
self.disposable.disposable = ret
def cancel(self):
"""Cancels the work item by disposing the resource returned by
invoke_core as soon as possible."""
self.disposable.dispose()
def is_cancelled(self):
return self.disposable.is_disposed
def __lt__(self, other):
return self.duetime < other.duetime
def __gt__(self, other):
return self.duetime > other.duetime
def __eq__(self, other):
return self.duetime == other.duetime
| mit | Python |
8b612e6801eb43a19b50dbdc82437a90d0872132 | prepend _ to objects not for the user | jwkvam/plotlywrapper | plotlywrapper.py | plotlywrapper.py | from tempfile import NamedTemporaryFile
import plotly.offline as py
import plotly.graph_objs as go
from IPython import get_ipython
from ipykernel import zmqshell
def _detect_notebook():
"""
this isn't 100% correct but seems good enough
"""
kernel = get_ipython()
return isinstance(kernel, zmqshell.ZMQInteractiveShell)
def _merge_dicts(d1, d2):
d = d2.copy()
d.update(d1)
return d
class _Plot(object):
def __init__(self, data=None, layout=None, **kargs):
self.data = data
if data is None:
self.data = []
self.layout = layout
if layout is None:
self.layout = {}
def __add__(self, other):
self.data += other.data
self.layout = _merge_dicts(self.layout, other.layout)
return self
def __radd__(self, other):
return self.__add__(other)
def group(self):
self.layout['barmode'] = 'group'
return self
def stack(self):
self.layout['barmode'] = 'stack'
return self
def xlabel(self, label):
self.layout['xaxis'] = {'title': label}
return self
def ylabel(self, label):
self.layout['yaxis'] = {'title': label}
return self
def show(self, filename=None):
is_notebook = _detect_notebook()
kargs = {}
if is_notebook:
py.init_notebook_mode()
plot = py.iplot
else:
plot = py.plot
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
kargs['filename'] = filename
fig = go.Figure(data=self.data, layout=go.Layout(**self.layout))
plot(fig, **kargs)
class Scatter(_Plot):
def __init__(self, x, y, label=None, **kargs):
data = [go.Scatter(x=x, y=y, name=label)]
super(Scatter, self).__init__(data=data)
class Bar(_Plot):
def __init__(self, x, y, label=None, mode='group', **kargs):
data = [go.Bar(x=x, y=y, name=label)]
layout = {'barmode': 'group'}
super(Bar, self).__init__(data=data, layout=layout)
| from tempfile import NamedTemporaryFile
import plotly.offline as py
import plotly.graph_objs as go
from IPython import get_ipython
from ipykernel import zmqshell
def detect_notebook():
"""
this isn't 100% correct but seems good enough
"""
kernel = get_ipython()
return isinstance(kernel, zmqshell.ZMQInteractiveShell)
def _merge_dicts(d1, d2):
d = d2.copy()
d.update(d1)
return d
class Plot(object):
def __init__(self, data=None, layout=None, **kargs):
self.data = data
if data is None:
self.data = []
self.layout = layout
if layout is None:
self.layout = {}
def __add__(self, other):
self.data += other.data
self.layout = _merge_dicts(self.layout, other.layout)
return self
def __radd__(self, other):
return self.__add__(other)
def group(self):
self.layout['barmode'] = 'group'
return self
def stack(self):
self.layout['barmode'] = 'stack'
return self
def xlabel(self, label):
self.layout['xaxis'] = {'title': label}
return self
def ylabel(self, label):
self.layout['yaxis'] = {'title': label}
return self
def show(self, filename=None):
is_notebook = detect_notebook()
kargs = {}
if is_notebook:
py.init_notebook_mode()
plot = py.iplot
else:
plot = py.plot
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
kargs['filename'] = filename
fig = go.Figure(data=self.data, layout=go.Layout(**self.layout))
plot(fig, **kargs)
class Scatter(Plot):
def __init__(self, x, y, label=None, **kargs):
data = [go.Scatter(x=x, y=y, name=label)]
super(Scatter, self).__init__(data=data)
class Bar(Plot):
def __init__(self, x, y, label=None, mode='group', **kargs):
data = [go.Bar(x=x, y=y, name=label)]
layout = {'barmode': 'group'}
super(Bar, self).__init__(data=data, layout=layout)
| mit | Python |
06a8f5221e1b352151af576612dd0b52a5f7b7b7 | Make loader bypass missing data groups, add control blocks to set class parameters | soccermetrics/marcotti | loader.py | loader.py | from etl import get_local_handles, ingest_feeds
from etl.ecsv import CSV_ETL_CLASSES
from local import LocalConfig
from interface import Marcotti
if __name__ == "__main__":
settings = LocalConfig()
marcotti = Marcotti(settings)
with marcotti.create_session() as sess:
for group in ['Supplier', 'Overview', 'Personnel', 'Match']:
for entity, datafile in settings.CSV_DATA.get(group, []):
if group in ['Supplier', 'Overview', 'Personnel']:
if entity == 'Venues':
params = (sess, settings.VENUE_EFF_DATE)
elif entity in ['Competitions', 'Clubs', 'Players', 'Positions']:
params = (sess, settings.DATA_SUPPLIER)
else:
params = (sess,)
else:
params = (sess, settings.COMPETITION_NAME, settings.SEASON_NAME)
if entity == 'PlayerStats':
params += (settings.DATA_SUPPLIER,)
if type(CSV_ETL_CLASSES[group][entity]) is list:
for etl_class in CSV_ETL_CLASSES[group][entity]:
ingest_feeds(get_local_handles, settings.CSV_DATA_DIR, datafile, etl_class(*params))
else:
ingest_feeds(get_local_handles, settings.CSV_DATA_DIR, datafile,
CSV_ETL_CLASSES[group][entity](*params))
| from etl import get_local_handles, ingest_feeds
from etl.ecsv import CSV_ETL_CLASSES
from local import LocalConfig
from interface import Marcotti
if __name__ == "__main__":
settings = LocalConfig()
marcotti = Marcotti(settings)
with marcotti.create_session() as sess:
for group in ['Overview', 'Personnel', 'Match']:
for entity, datafile in settings.CSV_DATA[group]:
if group in ['Overview', 'Personnel']:
if entity == 'Venues':
params = (sess, settings.VENUE_EFF_DATE)
else:
params = (sess,)
else:
params = (sess, settings.COMPETITION_NAME, settings.SEASON_NAME)
if CSV_ETL_CLASSES[group][entity] is list:
for etl_class in CSV_ETL_CLASSES[group][entity]:
ingest_feeds(get_local_handles, settings.CSV_DATA_DIR, datafile, etl_class(*params))
else:
ingest_feeds(get_local_handles, settings.CSV_DATA_DIR, datafile,
CSV_ETL_CLASSES[group][entity](*params))
| mit | Python |
14cc5e93b347813f57cd0dfb99b735a384649bfa | remove facebook rss hack | rascul/botwot | plugins/feeds.py | plugins/feeds.py | """ feed Plugin (botwot plugins.feeds) """
# Copyright 2014 Ray Schulz <https://rascul.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pprint
import time
import feedparser
import requests
import tldextract
from pyaib.plugins import every, plugin_class
from pyaib.db import db_driver
@plugin_class
@plugin_class.requires('db')
class Feeds(object):
def __init__(self, context, config):
self.context = context
def submit_link(self, link):
url = self.context.config.plugin.feeds.short_url
data = json.dumps({'url': link})
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=data, headers=headers)
j = json.loads(r.text)
if j and 'status' in j:
if j['status'] == 200:
return j['message']
return None
@every(60, name='feeds')
def feeds(self, context, name):
for feed_url in context.config.plugin.feeds.feeds:
feed = feedparser.parse(feed_url)
for entry in reversed(feed.entries):
link = self.submit_link(entry['link'])
#link = entry['link']
if link:
domain = tldextract.extract(link).domain
message = ""
if 'author_detail' in entry and 'name' in entry['author_detail']:
title = entry['title']
if len(title) > 200:
title = "%s..." % title[:200]
author = entry['author_detail']['name']
message = "%s on %s: %s - %s" % (author, domain, title, link)
elif 'summary' in entry:
summary = entry['summary']
if len(summary) > 200:
summary = "%s..." % summary[:200]
message = "%s - %s" % (summary, link)
context.PRIVMSG(context.config.plugin.feeds.channel, message)
# don't flood too hard!
time.sleep(1)
| """ feed Plugin (botwot plugins.feeds) """
# Copyright 2014 Ray Schulz <https://rascul.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pprint
import time
import feedparser
import requests
import tldextract
from pyaib.plugins import every, plugin_class
from pyaib.db import db_driver
@plugin_class
@plugin_class.requires('db')
class Feeds(object):
def __init__(self, context, config):
self.context = context
def submit_link(self, link):
url = self.context.config.plugin.feeds.short_url
data = json.dumps({'url': link})
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=data, headers=headers)
j = json.loads(r.text)
if j and 'status' in j:
if j['status'] == 200:
return j['message']
return None
@every(60, name='feeds')
def feeds(self, context, name):
for feed_url in context.config.plugin.feeds.feeds:
feed = feedparser.parse(feed_url)
for entry in reversed(feed.entries):
link = self.submit_link(entry['link'])
#link = entry['link']
if link:
domain = tldextract.extract(link).domain
if domain != "wallflux":
message = ""
if 'author_detail' in entry and 'name' in entry['author_detail']:
title = entry['title']
if len(title) > 200:
title = "%s..." % title[:200]
author = entry['author_detail']['name']
message = "%s on %s: %s - %s" % (author, domain, title, link)
elif 'summary' in entry:
summary = entry['summary']
if len(summary) > 200:
summary = "%s..." % summary[:200]
message = "%s - %s" % (summary, link)
context.PRIVMSG(context.config.plugin.feeds.channel, message)
# don't flood too hard!
time.sleep(1)
| apache-2.0 | Python |
4f6ed556cbbffd85c4698b30a3ae38ddf6849563 | change logger variablenames | enricoba/eems-box,enricoba/eems-box | logger.py | logger.py | from messagebus import Bus
import logging
# Message to Log:
# level: DEBUG, INFO, WARNING, ERROR, CRITICAL
# source: str() z.B. Display-Job, GSM-Job, Core-Job, ...
# msg: str()
# Define Functions for Logger
class Logger(object):
def __init__(self):
# Define Logger
log_format = '[*] %(asctime)s %(levelname)s %(message)s'
log_date_format = '%Y-%m-%d %H:%M:%S'
log_file = 'eems.log'
logging.basicConfig(level=logging.INFO,
format=log_format,
datefmt=log_date_format,
filename=log_file)
self.logger = logging.getLogger('eems')
self.log_lvl_info = {'DEBUG': 10,
'INFO': 20,
'WARNING': 30,
'ERROR': 40,
'CRITICAL': 50}
def write_log(self, msg_as_dict):
"""Public function *write_log* writes messages to the log file.
:param msg_as_dict: *dict*
:return: *None*
"""
self.logger.log(self.log_lvl_info[msg_as_dict[u'level']],
'{}: {}'.format(msg_as_dict[u'source'],
msg_as_dict[u'msg']))
def main():
logger = Logger()
while True:
message = Bus.receive('logger')
logger.write_log(message)
if __name__ == '__main__':
main()
| from messagebus import Bus
import logging
# VARIABLES
# STR
# con_str_01: log format
# con_str_02: log date format
# con_str_03: log file
# DICT
# con_dic_01: Log-level information
# Message to Log:
# level: DEBUG, INFO, WARNING, ERROR, CRITICAL
# source: str() z.B. Display-Job, GSM-Job, Core-Job, ...
# msg: str()
# Define Functions for Logger
class Logger(object):
def __init__(self):
# Define Logger
con_str_01 = '[*] %(asctime)s %(levelname)s %(message)s'
con_str_02 = '%Y-%m-%d %H:%M:%S'
con_str_03 = 'eems.log'
logging.basicConfig(level=logging.INFO,
format=con_str_01,
datefmt=con_str_02,
filename=con_str_03)
self.logger = logging.getLogger('eems')
self.con_dic_01 = {'DEBUG': 10,
'INFO': 20,
'WARNING': 30,
'ERROR': 40,
'CRITICAL': 50}
def write_log(self, msg_as_dict):
"""Public function *write_log* writes messages to the log file.
:param msg_as_dict: *dict*
:return: *None*
"""
self.logger.log(self.con_dic_01[msg_as_dict[u'level']],
'{}: {}'.format(msg_as_dict[u'source'],
msg_as_dict[u'msg']))
def main():
logger = Logger()
while True:
message = Bus.receive('logger')
logger.write_log(message)
if __name__ == '__main__':
main()
| mit | Python |
81f9f1b6f1d8790d71b2d17ffc9d03ab3285781d | Fix json() from json | rcoh/modata,rcoh/modata,rcoh/modata,rcoh/modata | client/restlib.py | client/restlib.py | import requests
import hashlib
SERVER = "http://localhost:1234/"
def store(chunk, digest):
return requests.post(SERVER + "store", data={'key': digest, 'data': chunk}).json()
def store_auto(chunk):
return store(chunk, digest_for_chunk(chunk))
def findvalue(digest):
resp = requests.get(SERVER + "find-value/" + digest).json()
if resp['status'] == 'OK':
return resp['data']
else:
return None
def digest_for_chunk(chunk):
return hashlib.sha1(chunk).hexdigest()
| import requests
import hashlib
SERVER = "http://localhost:1234/"
def store(chunk, digest):
return requests.post(SERVER + "store", data={'key': digest, 'data': chunk}).json
def store_auto(chunk):
return store(chunk, digest_for_chunk(chunk))
def findvalue(digest):
resp = requests.get(SERVER + "find-value/" + digest).json
if resp['status'] == 'OK':
return resp['data']
else:
return None
def digest_for_chunk(chunk):
return hashlib.sha1(chunk).hexdigest()
| mit | Python |
bf81ddba9e9516c08fa9c6b259013d17b9039ec2 | Call commands more explicitly | tobymccann/flask-base,hack4impact/asylum-connect-catalog,AsylumConnect/asylum-connect-catalog,hack4impact/asylum-connect-catalog,ColinHaley/Konsole,AsylumConnect/asylum-connect-catalog,hack4impact/asylum-connect-catalog,hack4impact/asylum-connect-catalog,AsylumConnect/asylum-connect-catalog,tobymccann/flask-base,ColinHaley/Konsole,AsylumConnect/asylum-connect-catalog,ColinHaley/Konsole,tobymccann/flask-base,hack4impact/flask-base,hack4impact/flask-base,hack4impact/flask-base | manage.py | manage.py | #!/usr/bin/env python
import os
import subprocess
from app import create_app, db
from app.models import Role, User
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager, Shell
from redis import Redis
from rq import Connection, Queue, Worker
if os.path.exists('.env'):
print('Importing environment from .env file')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.drop_all()
db.create_all()
db.session.commit()
@manager.option(
'-n',
'--number-users',
default=10,
type=int,
help='Number of each model type to create',
dest='number_users')
def add_fake_data(number_users):
"""
Adds fake data to the database.
"""
User.generate_fake(count=number_users)
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production."""
Role.insert_roles()
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(
host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc *.py app/'
yapf = 'yapf -r -i *.py app/'
print 'Running {}'.format(isort)
subprocess.call(isort, shell=True)
print 'Running {}'.format(yapf)
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| #!/usr/bin/env python
import os
import subprocess
from app import create_app, db
from app.models import Role, User
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager, Shell
from redis import Redis
from rq import Connection, Queue, Worker
if os.path.exists('.env'):
print('Importing environment from .env file')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.drop_all()
db.create_all()
db.session.commit()
@manager.option(
'-n',
'--number-users',
default=10,
type=int,
help='Number of each model type to create',
dest='number_users')
def add_fake_data(number_users):
"""
Adds fake data to the database.
"""
User.generate_fake(count=number_users)
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production."""
Role.insert_roles()
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(
host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc --skip env .'
yapf = 'yapf -e "./env/*" -r -i .'
print 'Running {}'.format(isort)
subprocess.call(isort, shell=True)
print 'Running {}'.format(yapf)
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| mit | Python |
6c56a22dc353dcf09d2f6f87f8220a0c20921688 | allow all | if1live/importd-boilerplate,if1live/importd-boilerplate | manage.py | manage.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from importd import d
import os
import sys
def get_sentry_apps():
if 'SENTRY_DSN' in os.environ:
return ('raven.contrib.django.raven_compat',)
else:
return ()
if 'gunicorn' in sys.argv[0]:
DEBUG = False
else:
DEBUG = True
d(
DEBUG=DEBUG,
INSTALLED_APPS=(
# external library
'django_nose',
# django rest framework
'rest_framework',
'rest_framework.authtoken',
'sella',
'demo',
'api',
) + get_sentry_apps(),
# django-jinja
DEFAULT_JINJA2_TEMPLATE_EXTENSION='.jinja2',
TEMPLATE_LOADERS=(
# django-jinja
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
# django
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
# django-nose
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
# sentry
RAVEN_CONFIG={
'dsn': os.environ['SENTRY_DSN'] if 'SENTRY_DSN' in os.environ else '',
},
# for heroku deploy
# '*' or '127.0.0.1' or 'importd-boilerplate.herokuapp.com'
ALLOWED_HOSTS=['*'],
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend',
mounts={"demo": "/demo/", 'rest_framework': '/api/'}
)
if __name__ == "__main__":
d.main()
| #!/usr/bin/env python
#-*- coding: utf-8 -*-
from importd import d
import os
import sys
def get_sentry_apps():
if 'SENTRY_DSN' in os.environ:
return ('raven.contrib.django.raven_compat',)
else:
return ()
if 'gunicorn' in sys.argv[0]:
DEBUG = False
else:
DEBUG = True
d(
DEBUG=DEBUG,
INSTALLED_APPS=(
# external library
'django_nose',
# django rest framework
'rest_framework',
'rest_framework.authtoken',
'sella',
'demo',
'api',
) + get_sentry_apps(),
# django-jinja
DEFAULT_JINJA2_TEMPLATE_EXTENSION='.jinja2',
TEMPLATE_LOADERS=(
# django-jinja
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
# django
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
# django-nose
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
# sentry
RAVEN_CONFIG={
'dsn': os.environ['SENTRY_DSN'] if 'SENTRY_DSN' in os.environ else '',
},
# for heroku deploy
# '*' or '127.0.0.1'
ALLOWED_HOSTS=['127.0.0.1'],
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend',
mounts={"demo": "/demo/", 'rest_framework': '/api/'}
)
if __name__ == "__main__":
d.main()
| mit | Python |
9922f630a22df312ef4cacc80a998d2d71feebfb | Use python 3.6 new choices weights feature | tortxof/flask-password,tortxof/flask-password,tortxof/flask-password | markov.py | markov.py | import string
from collections import defaultdict
import random
def pairwise(iterable):
"""
Yield pairs of consecutive elements in iterable.
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
"""
iterator = iter(iterable)
try:
a = iterator.__next__()
except StopIteration:
return
for b in iterator:
yield a, b
a = b
class Markov(object):
def __init__(self, corpus_file='corpus.txt'):
with open(corpus_file) as f:
corpus = f.read()
corpus = ''.join(c.lower() if c in string.ascii_letters else ' ' for c in corpus).split()
corpus = [word for word in corpus if len(word) > 1]
self.counts = defaultdict(lambda: defaultdict(int))
for word in corpus:
for a, b in pairwise(word):
self.counts[a][b] += 1
self.sys_rand = random.SystemRandom()
def gen_password(self, l=16):
password = []
password.append(self.sys_rand.choice(string.ascii_lowercase))
while len(password) < l:
choices, weights = zip(*self.counts[password[-1]].items())
password.append(self.sys_rand.choices(choices, weights=weights)[0])
cap, num = self.sys_rand.sample(range(len(password)), 2)
password[num] = str(self.sys_rand.randrange(10))
password[cap] = password[cap].upper()
return ''.join(password)
| import string
from collections import defaultdict
import random
import itertools
import bisect
def pairwise(iterable):
"""
Yield pairs of consecutive elements in iterable.
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
"""
iterator = iter(iterable)
try:
a = iterator.__next__()
except StopIteration:
return
for b in iterator:
yield a, b
a = b
class Markov(object):
def __init__(self, corpus_file='corpus.txt'):
with open(corpus_file) as f:
corpus = f.read()
corpus = ''.join(c.lower() if c in string.ascii_letters else ' ' for c in corpus).split()
corpus = [word for word in corpus if len(word) > 1]
self.counts = defaultdict(lambda: defaultdict(int))
for word in corpus:
for a, b in pairwise(word):
self.counts[a][b] += 1
self.sys_rand = random.SystemRandom()
def gen_password(self, l=16):
password = []
state = self.sys_rand.choice(string.ascii_lowercase)
password.append(state)
while len(password) < l:
choices, weights = zip(*self.counts[state].items())
cumdist = list(itertools.accumulate(weights))
x = self.sys_rand.random() * cumdist[-1]
state = choices[bisect.bisect(cumdist, x)]
password.append(state)
cap, num = self.sys_rand.sample(range(len(password)), 2)
password[num] = str(self.sys_rand.randrange(10))
password[cap] = password[cap].upper()
return ''.join(password)
| mit | Python |
a1294571b049bf0e81e997816ad038e4fb56aa40 | Allow specifying multiple times | kulawczukmarcin/mypox,adusia/pox,chenyuntc/pox,adusia/pox,jacobq/csci5221-viro-project,noxrepo/pox,carlye566/IoT-POX,denovogroup/pox,xAKLx/pox,pthien92/sdn,chenyuntc/pox,PrincetonUniversity/pox,noxrepo/pox,diogommartins/pox,chenyuntc/pox,PrincetonUniversity/pox,diogommartins/pox,adusia/pox,denovogroup/pox,xAKLx/pox,kulawczukmarcin/mypox,waltznetworks/pox,MurphyMc/pox,PrincetonUniversity/pox,pthien92/sdn,xAKLx/pox,noxrepo/pox,carlye566/IoT-POX,kulawczukmarcin/mypox,jacobq/csci5221-viro-project,carlye566/IoT-POX,waltznetworks/pox,VamsikrishnaNallabothu/pox,jacobq/csci5221-viro-project,denovogroup/pox,waltznetworks/pox,kavitshah8/SDNDeveloper,VamsikrishnaNallabothu/pox,andiwundsam/_of_normalize,kulawczukmarcin/mypox,MurphyMc/pox,PrincetonUniversity/pox,kavitshah8/SDNDeveloper,denovogroup/pox,kavitshah8/SDNDeveloper,VamsikrishnaNallabothu/pox,jacobq/csci5221-viro-project,VamsikrishnaNallabothu/pox,andiwundsam/_of_normalize,waltznetworks/pox,diogommartins/pox,kpengboy/pox-exercise,jacobq/csci5221-viro-project,pthien92/sdn,carlye566/IoT-POX,pthien92/sdn,andiwundsam/_of_normalize,pthien92/sdn,adusia/pox,MurphyMc/pox,carlye566/IoT-POX,kpengboy/pox-exercise,kpengboy/pox-exercise,denovogroup/pox,kpengboy/pox-exercise,kulawczukmarcin/mypox,chenyuntc/pox,diogommartins/pox,MurphyMc/pox,kpengboy/pox-exercise,diogommartins/pox,chenyuntc/pox,adusia/pox,PrincetonUniversity/pox,VamsikrishnaNallabothu/pox,noxrepo/pox,xAKLx/pox,kavitshah8/SDNDeveloper,MurphyMc/pox,xAKLx/pox,waltznetworks/pox,andiwundsam/_of_normalize | pox/log/level.py | pox/log/level.py | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from pox.core import core
def launch (__INSTANCE__=None, **kw):
"""
Allows configuring log levels from the commandline.
For example, to turn off the verbose web logging, try:
pox.py web.webcore log.level --web.webcore=INFO
"""
for k,v in kw.iteritems():
if v is True:
# This means they did something like log.level --DEBUG
v = k
k = "" # Root logger
core.getLogger(k).setLevel(v)
| # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from pox.core import core
def launch (**kw):
"""
Allows configuring log levels from the commandline.
For example, to turn off the verbose web logging, try:
pox.py web.webcore log.level --web.webcore=INFO
"""
for k,v in kw.iteritems():
if v is True:
# This means they did something like log.level --DEBUG
v = k
k = "" # Root logger
core.getLogger(k).setLevel(v)
| apache-2.0 | Python |
d05fdde673551c054c776bfc2d51fe0bc28ceda7 | refactor class to MetPyMapFeature | jrleeman/MetPy,ShawnMurd/MetPy,dopplershift/MetPy,Unidata/MetPy,jrleeman/MetPy,dopplershift/MetPy,ahaberlie/MetPy,Unidata/MetPy,ahaberlie/MetPy | metpy/plots/cartopy_utils.py | metpy/plots/cartopy_utils.py | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Cartopy specific mapping utilities."""
import cartopy.feature as cfeat
import cartopy.io.shapereader as shpreader
from ..cbook import get_test_data
class MetPyMapFeature(cfeat.NaturalEarthFeature):
"""A simple interface to US County shapefiles."""
def __init__(self, name, scale, **kwargs):
"""Create USCountiesFeature instance."""
super(MetPyMapFeature, self).__init__('', name, scale, **kwargs)
def geometries(self):
"""Return an iterator of (shapely) geometries for this feature."""
# Ensure that the associated files are in the cache
fname = '{}_{}'.format(self.name, self.scale)
for extension in ['.dbf', '.shx']:
get_test_data(fname + extension)
path = get_test_data(fname + '.shp', as_file_obj=False)
return iter(tuple(shpreader.Reader(path).geometries()))
def with_scale(self, new_scale):
"""
Return a copy of the feature with a new scale.
Parameters
----------
new_scale
The new dataset scale, i.e. one of '500k', '5m', or '20m'.
Corresponding to 1:500,000, 1:5,000,000, and 1:20,000,000
respectively.
"""
return MetPyMapFeature(self.name, new_scale, **self.kwargs)
USCOUNTIES = MetPyMapFeature('us_counties', '20m', facecolor='None', edgecolor='black')
USSTATES = MetPyMapFeature('us_states', '20m', facecolor='None', edgecolor='black')
| # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Cartopy specific mapping utilities."""
import cartopy.feature as cfeat
import cartopy.io.shapereader as shpreader
from ..cbook import get_test_data
class USCountiesFeature(cfeat.NaturalEarthFeature):
"""A simple interface to US County shapefiles."""
def __init__(self, scale, **kwargs):
"""Create USCountiesFeature instance."""
super(USCountiesFeature, self).__init__('', 'us_counties', scale, **kwargs)
def geometries(self):
"""Return an iterator of (shapely) geometries for this feature."""
# Ensure that the associated files are in the cache
fname = 'us_counties_{}'.format(self.scale)
for extension in ['.dbf', '.shx']:
get_test_data(fname + extension)
path = get_test_data(fname + '.shp', as_file_obj=False)
return iter(tuple(shpreader.Reader(path).geometries()))
def with_scale(self, new_scale):
"""
Return a copy of the feature with a new scale.
Parameters
----------
new_scale
The new dataset scale, i.e. one of '500k', '5m', or '20m'.
Corresponding to 1:500,000, 1:5,000,000, and 1:20,000,000
respectively.
"""
return USCountiesFeature(new_scale, **self.kwargs)
USCOUNTIES = USCountiesFeature('20m', facecolor='None', edgecolor='black')
class USStatesFeature(cfeat.NaturalEarthFeature):
"""A simple interface to US State shapefiles."""
def __init__(self, scale, **kwargs):
"""Create USStatesFeature instance."""
super(USStatesFeature, self).__init__('', 'us_states', scale, **kwargs)
def geometries(self):
"""Return an iterator of (shapely) geometries for this feature."""
# Ensure that the associated files are in the cache
fname = 'us_states_{}'.format(self.scale)
for extension in ['.dbf', '.shx']:
get_test_data(fname + extension)
path = get_test_data(fname + '.shp', as_file_obj=False)
return iter(tuple(shpreader.Reader(path).geometries()))
def with_scale(self, new_scale):
"""
Return a copy of the feature with a new scale.
Parameters
----------
new_scale
The new dataset scale, i.e. one of '500k', '5m', or '20m'.
Corresponding to 1:500,000, 1:5,000,000, and 1:20,000,000
respectively.
"""
return USStatesFeature(new_scale, **self.kwargs)
USSTATES = USStatesFeature('20m', facecolor='None', edgecolor='black')
| bsd-3-clause | Python |
f428fe4bd0eaabd47f7e97724eb9e28702249c30 | Add settings | indico/indico-plugins,ThiefMaster/indico-plugins,indico/indico-plugins,indico/indico-plugins,ThiefMaster/indico-plugins,ThiefMaster/indico-plugins,indico/indico-plugins,ThiefMaster/indico-plugins | payment_manual/indico_payment_manual/plugin.py | payment_manual/indico_payment_manual/plugin.py | # This file is part of Indico.
# Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from wtforms.fields.simple import TextAreaField
from wtforms.validators import DataRequired
from indico.core.plugins import IndicoPlugin
from indico.modules.payment import PaymentPluginMixin, PaymentPluginSettingsFormBase, PaymentEventSettingsFormBase
from indico.util.i18n import _
from indico.web.forms.validators import UsedIf
DETAILS_DESC = _('The details the user needs to make their payment. This usually includes the bank account details '
'the IBAN and payment reference.')
class PluginSettingsForm(PaymentPluginSettingsFormBase):
details = TextAreaField(_('Payment details'), [], description=DETAILS_DESC)
class EventSettingsForm(PaymentEventSettingsFormBase):
details = TextAreaField(_('Payment details'), [UsedIf(lambda form, _: form.enabled.data), DataRequired()],
description=DETAILS_DESC)
class ManualPaymentPlugin(PaymentPluginMixin, IndicoPlugin):
"""Manual Payment
Provides a payment method where bank details etc. are shown to the user
who then pays manually using e.g. a wire transfer. Marking the registrant
as paid is then done manually by a manager of the event.
"""
settings_form = PluginSettingsForm
event_settings_form = EventSettingsForm
| # This file is part of Indico.
# Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.plugins import IndicoPlugin
class ManualPaymentPlugin(IndicoPlugin):
"""Manual Payment
Provides a payment method where bank details etc. are shown to the user
who then pays manually using e.g. a wire transfer. Marking the registrant
as paid is then done manually by a manager of the event.
"""
| mit | Python |
4deaf02bd3e8e0d617559ad8035350f735866788 | Fix url | colajam93/aurpackager,colajam93/aurpackager,colajam93/aurpackager,colajam93/aurpackager | lib/aur/query.py | lib/aur/query.py | from urllib.request import urlopen
from contextlib import closing
import json
AUR_URL = 'https://aur.archlinux.org'
BASE_URL = AUR_URL + '/rpc/?v=5&'
INFO_URL = BASE_URL + 'type=info&'
class PackageNotFoundError(Exception):
pass
class AttrDict(dict):
"""
http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class AURInfo(AttrDict):
def __init__(self, package_dict):
super().__init__(package_dict)
self.tar_url = AUR_URL + self.URLPath
def __aur_query(url):
with closing(urlopen(url)) as request:
result = json.loads(request.read().decode())
return result
def info(package):
url = INFO_URL + 'arg[]={}'.format(package)
result = __aur_query(url)
if result['resultcount'] == 0:
raise PackageNotFoundError
return AURInfo(result['results'][0])
def multiple_info(packages):
url = INFO_URL + '&'.join(map(lambda x: 'arg[]={}'.format(x), packages))
result = __aur_query(url)
# dict which key is the package name
ret = dict()
for package in (AURInfo(x) for x in result['results']):
ret[package.Name] = package
packages.remove(package.Name)
# if package is not found, insert None instead
for package in packages:
if package not in ret:
ret[package] = None
return ret
| from urllib.request import urlopen
from contextlib import closing
import json
AUR_URL = 'https://aur.archlinux.org'
BASE_URL = AUR_URL + '/rpc/?v=5&'
INFO_URL = BASE_URL + 'type=info&'
class PackageNotFoundError(Exception):
pass
class AttrDict(dict):
"""
http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class AURInfo(AttrDict):
def __init__(self, package_dict):
super().__init__(package_dict)
self.tar_path = AUR_URL + self.URLPath
def info(package):
url = INFO_URL + '&arg[]={}'.format(package)
with closing(urlopen(url)) as request:
result = json.loads(request.read().decode())
if result['resultcount'] == 0:
raise PackageNotFoundError
return AURInfo(result['results'][0])
def multiple_info(packages):
url = INFO_URL + '&'.join(map(lambda x: 'arg[]={}'.format(x), packages))
with closing(urlopen(url)) as request:
result = json.loads(request.read().decode())
# dict which key is the package name
ret = dict()
for package in (AURInfo(x) for x in result['results']):
ret[package.Name] = package
packages.remove(package.Name)
# if package is not found, insert None instead
for package in packages:
if package not in ret:
ret[package] = None
return ret
| mit | Python |
52f2f2ec04bb99b83f02e5172ccd7a1180af9260 | Fix animated images test | missionpinball/mpf-mc,missionpinball/mpf-mc,missionpinball/mpf-mc | mpfmc/tests/test_AnimatedImages.py | mpfmc/tests/test_AnimatedImages.py | from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
class TestAnimatedImages(MpfMcTestCase):
def get_machine_path(self):
return 'tests/machine_files/animated_images'
def get_config_file(self):
return 'test_animated_images.yaml'
def test_animated_images_loading(self):
self.assertEqual(self.mc.images['ball'].image._anim_index, 0)
self.assertEqual(self.mc.images['ball'].image._anim_delay, -1)
self.assertEqual(self.mc.images['busy-stick-figures-animated'].
image._anim_index, 0)
self.assertEqual(self.mc.images['busy-stick-figures-animated'].
image._anim_delay, -1)
self.assertEqual(len(self.mc.images['busy-stick-figures-animated'].
image.image.textures), 10)
def test_animated_images(self):
self.mc.events.post('slide1')
self.advance_time()
ball = self.mc.targets['default'].current_slide.widgets[0].widget
stick_figures = self.mc.targets['default'].current_slide.widgets[1].widget
self.advance_time()
# make sure they're playing as they should
self.assertEqual(ball.fps, 30)
self.assertEqual(ball.loops, -1)
self.assertEqual(stick_figures.fps, 10)
self.assertEqual(stick_figures.loops, -1)
# test stopping
stick_figures.stop()
self.advance_time()
stopped_frame = stick_figures.current_frame
for x in range(10):
self.assertEqual(stick_figures.current_frame, stopped_frame)
self.advance_time()
# test jumping to a new frame
stick_figures.current_frame = 5
self.assertEqual(stick_figures.current_frame, 6)
# test starting
stick_figures.play()
self.advance_time()
| from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
class TestAnimatedImages(MpfMcTestCase):
def get_machine_path(self):
return 'tests/machine_files/animated_images'
def get_config_file(self):
return 'test_animated_images.yaml'
def test_animated_images_loading(self):
self.assertEqual(self.mc.images['ball'].image._anim_index, 0)
self.assertEqual(self.mc.images['ball'].image._anim_delay, -1)
self.assertEqual(self.mc.images['busy-stick-figures-animated'].
image._anim_index, 0)
self.assertEqual(self.mc.images['busy-stick-figures-animated'].
image._anim_delay, -1)
self.assertEqual(len(self.mc.images['busy-stick-figures-animated'].
image.image.textures), 10)
def test_animated_images(self):
self.mc.events.post('slide1')
self.advance_time()
ball = self.mc.targets['default'].current_slide.widgets[0]
stick_figures = self.mc.targets['default'].current_slide.widgets[1]
self.advance_time()
# make sure they're playing as they should
self.assertEqual(ball.fps, 30)
self.assertEqual(ball.loops, -1)
self.assertEqual(stick_figures.fps, 10)
self.assertEqual(stick_figures.loops, -1)
# test stopping
stick_figures.stop()
self.advance_time()
stopped_frame = stick_figures.current_frame
for x in range(10):
self.assertEqual(stick_figures.current_frame, stopped_frame)
self.advance_time()
# test jumping to a new frame
stick_figures.current_frame = 5
self.assertEqual(stick_figures.current_frame, 6)
# test starting
stick_figures.play()
self.advance_time()
| mit | Python |
f849acc0012ef08248a27f41af77bc57831b2829 | Fix invocation. | aequitas/python-rflink | rflink/__main__.py | rflink/__main__.py | """Command line interface for rflink library.
Usage:
rflink [-v] [--port=<port> [--baud=<baud>] | --host=<host> --port=<port>]
rflink (-h | --help)
rflink --version
Options:
-p --port=<port> Serial port to connect to [default: /dev/ttyACM0],
or TCP port in TCP mode.
--baud=<baud> Serial baud rate [default: 57600].
--host=<host> TCP mode, connect to host instead of serial port.
-h --help Show this screen.
-v --verbose Increase verbosity
--version Show version.
"""
import asyncio
import logging
import sys
from functools import partial
import pkg_resources
from docopt import docopt
from serial_asyncio import create_serial_connection
from .protocol import RflinkProtocol
def main(argv=sys.argv[1:], loop=None):
"""Parse argument and setup main program loop."""
args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version)
if args['--verbose']:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level)
if not loop:
loop = asyncio.get_event_loop()
protocol = partial(RflinkProtocol, loop)
if args.get('--host'):
conn = loop.create_connection(protocol,
args['--host'], args['--port'])
else:
conn = create_serial_connection(loop, protocol,
args['--port'], args['--baud'])
try:
loop.run_until_complete(conn)
loop.run_forever()
finally:
loop.close()
| """Command line interface for rflink library.
Usage:
rflink [-v] [--port=<port> [--baud=<baud>] | --host=<host> --port=<port>]
rflink (-h | --help)
rflink --version
Options:
-p --port=<port> Serial port to connect to [default: /dev/ttyACM0],
or TCP port in TCP mode.
--baud=<baud> Serial baud rate [default: 57600].
--host=<host> TCP mode, connect to host instead of serial port.
-h --help Show this screen.
-v --verbose Increase verbosity
--version Show version.
"""
import asyncio
import logging
import sys
from functools import partial
import pkg_resources
from docopt import docopt
from serial_asyncio import create_serial_connection
from .protocol import RflinkProtocol
def main(argv=sys.argv, loop=None):
"""Parse argument and setup main program loop."""
args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version)
if args['--verbose']:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level)
if not loop:
loop = asyncio.get_event_loop()
protocol = partial(RflinkProtocol, loop)
if args.get('--host'):
conn = loop.create_connection(protocol,
args['--host'], args['--port'])
else:
conn = create_serial_connection(loop, protocol,
args['--port'], args['--baud'])
try:
loop.run_until_complete(conn)
loop.run_forever()
finally:
loop.close()
| mit | Python |
b1abe85e5c6a1ca78033d79e4729dc004bcfd8fd | reduce disk reqs of example task config | somic/paasta,Yelp/paasta,Yelp/paasta,somic/paasta | task_processing/executors/task_executor.py | task_processing/executors/task_executor.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
from collections import namedtuple
import six
TaskConfig = namedtuple(
'TaskConfig',
['image', 'cmd', 'cpus', 'mem', 'disk', 'volumes', 'ports', 'cap_add',
'ulimit', 'docker_parameters'],
)
def make_task_config(image="ubuntu:xenial", cmd="/bin/true", cpus=0.1,
mem=32, disk=10, volumes=None, ports=[], cap_add=[],
ulimit=[], docker_parameters=[]):
if volumes is None:
volumes = {}
if ports is None:
ports = []
if cap_add is None:
cap_add = []
if ulimit is None:
ulimit = []
if docker_parameters:
docker_parameters = []
return TaskConfig(image, cmd, cpus, mem, disk, volumes, ports, cap_add,
ulimit, docker_parameters)
@six.add_metaclass(abc.ABCMeta)
class TaskExecutor(object):
@abc.abstractmethod
def run(self, task_config):
pass
@abc.abstractmethod
def kill(self, task_id):
pass
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
from collections import namedtuple
import six
TaskConfig = namedtuple(
'TaskConfig',
['image', 'cmd', 'cpus', 'mem', 'disk', 'volumes', 'ports', 'cap_add',
'ulimit', 'docker_parameters'],
)
def make_task_config(image="ubuntu:xenial", cmd="/bin/true", cpus=0.1,
mem=32, disk=1000, volumes=None, ports=[], cap_add=[],
ulimit=[], docker_parameters=[]):
if volumes is None:
volumes = {}
if ports is None:
ports = []
if cap_add is None:
cap_add = []
if ulimit is None:
ulimit = []
if docker_parameters:
docker_parameters = []
return TaskConfig(image, cmd, cpus, mem, disk, volumes, ports, cap_add,
ulimit, docker_parameters)
@six.add_metaclass(abc.ABCMeta)
class TaskExecutor(object):
@abc.abstractmethod
def run(self, task_config):
pass
@abc.abstractmethod
def kill(self, task_id):
pass
| apache-2.0 | Python |
bbd37f93fe679f9df8b47fe04384ee68d943a89f | Remove unneeded bits from the recommonmark tests' Sphinx config. | erikrose/sphinx-js,erikrose/sphinx-js,erikrose/sphinx-js | tests/test_common_mark/source/docs/conf.py | tests/test_common_mark/source/docs/conf.py | from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
extensions = [
'sphinx.ext.mathjax',
'sphinx_js'
]
source_suffix = ['.rst', '.md']
master_doc = 'index'
author = 'Jam Risser'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
source_parsers = {
'.md': CommonMarkParser
}
def setup(app):
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Content',
'enable_auto_doc_ref': True,
'enable_auto_toc_tree': True,
'enable_eval_rst': True,
'enable_inline_math': True,
'enable_math': True
}, True)
app.add_transform(AutoStructify)
| #!/usr/bin/env python3
from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
author = 'Jam Risser'
copyright = '2018, Jam Risser'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
extensions = [
'sphinx.ext.mathjax',
'sphinx_js'
]
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
htmlhelp_basename = 'sphinx_hello_worlddoc'
language = None
master_doc = 'index'
needs_sphinx = '1.0'
#primary_domain = 'js'
project = 'sphinx-hello-world'
pygments_style = 'sphinx'
release = '0.0.1'
source_parsers = {
'.md': CommonMarkParser
}
source_suffix = ['.rst', '.md']
templates_path = ['_templates']
todo_include_todos = False
version = '0.0.1'
def setup(app):
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Content',
'enable_auto_doc_ref': True,
'enable_auto_toc_tree': True,
'enable_eval_rst': True,
'enable_inline_math': True,
'enable_math': True
}, True)
app.add_transform(AutoStructify)
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.