commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
3f087a69e3e907e68fe99030bb191f9c5b089de7 | Add space after UNZIP command. | rasmus-toftdahl-olesen/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,micronpn/sequanto-automation,seqzap/sequanto-automation,seqzap/sequanto-automation,micronpn/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,micronpn/sequanto-automation,micronpn/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,seqzap/sequanto-automation,seqzap/sequanto-automation,micronpn/sequanto-automation,seqzap/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation | test_qmake_build.py | test_qmake_build.py | #!/usr/bin/python
import sys
import subprocess
import os
from os import path
import shutil
def call ( cmdline ):
retcode = subprocess.call ( cmdline, shell = True )
assert ( retcode == 0 )
CMAKE = 'cmake'
GENERATORARGS = ''
MAKE = 'make -j 2 '
UNZIP = 'unzip'
MV = 'mv'
if sys.platform == 'win32':
CMAKE = path.join ( os.getenv('ProgramFiles'), 'CMake 2.8', 'bin', 'cmake.exe' )
if not path.exists(CMAKE):
print 'Can not find cmake:', CMAKE
sys.exit(-1)
CMAKE = '"%s"' % CMAKE
GENERATORARGS = '-G "NMake Makefiles"'
MAKE = 'nmake'
UNZIP = path.join ( 'C:\\Program Files', '7-zip', '7z.exe' )
if not path.exists(UNZIP):
print 'Can not find unzip:', UNZIP
sys.exit(-1)
UNZIP = '"%s" x ' % UNZIP
MV = 'ren'
ROOT = path.join ( os.getcwd(), 'build_qmake_test' )
if path.exists(ROOT):
shutil.rmtree ( ROOT )
DIR = path.join(ROOT, 'qmake-machine-automation')
os.makedirs ( DIR )
os.chdir ( DIR )
call ( CMAKE + ' -DSQ_GENERATE_QMAKE:BOOL=ON -DSQ_BUILD_SHARED_LIBRARIES:BOOL=ON -DSQ_QT4:BOOL=ON -DSQ_QT_MACHINE_AUTOMATION:BOOL=ON -DCPACK_BINARY_NSIS:BOOL=OFF -DCPACK_BINARY_ZIP:BOOL=ON ' + GENERATORARGS + ' ../..' )
call ( MAKE + ' package' )
os.makedirs ( path.join(DIR, 'qmake_test' ) )
os.chdir ( path.join(DIR, 'qmake_test' ) )
call ( UNZIP + ' ..\*.zip' )
filename = os.listdir('.')[-1]
call ( MV + ' ' + filename + ' machine-automation' )
os.chdir ( path.join(DIR, 'qmake_test', 'machine-automation' ) )
call ( 'qmake' )
call ( MAKE )
DIR = path.join(ROOT, 'qmake')
os.makedirs ( DIR )
os.chdir ( DIR )
call ( CMAKE + ' -DSQ_GENERATE_QMAKE:BOOL=ON -DSQ_BUILD_SHARED_LIBRARIES:BOOL=ON -DSQ_QT4:BOOL=ON -DCPACK_BINARY_NSIS:BOOL=OFF -DCPACK_BINARY_ZIP:BOOL=ON ' + GENERATORARGS + ' ../..' )
call ( MAKE + ' package' )
os.makedirs ( path.join(DIR, 'qmake_test' ) )
os.chdir ( path.join(DIR, 'qmake_test' ) )
call ( UNZIP + ' ..\*.zip' )
filename = os.listdir('.')[-1]
call ( MV + ' ' + filename + ' SequantoAutomation' )
os.chdir ( path.join(DIR, 'qmake_test', 'SequantoAutomation' ) )
call ( 'qmake' )
call ( MAKE )
| #!/usr/bin/python
import sys
import subprocess
import os
from os import path
import shutil
def call ( cmdline ):
retcode = subprocess.call ( cmdline, shell = True )
assert ( retcode == 0 )
CMAKE = 'cmake'
GENERATORARGS = ''
MAKE = 'make -j 2 '
UNZIP = 'unzip'
MV = 'mv'
if sys.platform == 'win32':
CMAKE = path.join ( os.getenv('ProgramFiles'), 'CMake 2.8', 'bin', 'cmake.exe' )
if not path.exists(CMAKE):
print 'Can not find cmake:', CMAKE
sys.exit(-1)
CMAKE = '"%s"' % CMAKE
GENERATORARGS = '-G "NMake Makefiles"'
MAKE = 'nmake'
UNZIP = path.join ( 'C:\\Program Files', '7-zip', '7z.exe' )
if not path.exists(UNZIP):
print 'Can not find unzip:', UNZIP
sys.exit(-1)
UNZIP = '"%s" x ' % UNZIP
MV = 'ren'
ROOT = path.join ( os.getcwd(), 'build_qmake_test' )
if path.exists(ROOT):
shutil.rmtree ( ROOT )
DIR = path.join(ROOT, 'qmake-machine-automation')
os.makedirs ( DIR )
os.chdir ( DIR )
call ( CMAKE + ' -DSQ_GENERATE_QMAKE:BOOL=ON -DSQ_BUILD_SHARED_LIBRARIES:BOOL=ON -DSQ_QT4:BOOL=ON -DSQ_QT_MACHINE_AUTOMATION:BOOL=ON -DCPACK_BINARY_NSIS:BOOL=OFF -DCPACK_BINARY_ZIP:BOOL=ON ' + GENERATORARGS + ' ../..' )
call ( MAKE + ' package' )
os.makedirs ( path.join(DIR, 'qmake_test' ) )
os.chdir ( path.join(DIR, 'qmake_test' ) )
call ( UNZIP + '..\*.zip' )
filename = os.listdir('.')[-1]
call ( MV + ' ' + filename + ' machine-automation' )
os.chdir ( path.join(DIR, 'qmake_test', 'machine-automation' ) )
call ( 'qmake' )
call ( MAKE )
DIR = path.join(ROOT, 'qmake')
os.makedirs ( DIR )
os.chdir ( DIR )
call ( CMAKE + ' -DSQ_GENERATE_QMAKE:BOOL=ON -DSQ_BUILD_SHARED_LIBRARIES:BOOL=ON -DSQ_QT4:BOOL=ON -DCPACK_BINARY_NSIS:BOOL=OFF -DCPACK_BINARY_ZIP:BOOL=ON ' + GENERATORARGS + ' ../..' )
call ( MAKE + ' package' )
os.makedirs ( path.join(DIR, 'qmake_test' ) )
os.chdir ( path.join(DIR, 'qmake_test' ) )
call ( UNZIP + '..\*.zip' )
filename = os.listdir('.')[-1]
call ( MV + ' ' + filename + ' SequantoAutomation' )
os.chdir ( path.join(DIR, 'qmake_test', 'SequantoAutomation' ) )
call ( 'qmake' )
call ( MAKE )
| apache-2.0 | Python |
c67d650d7d72068694d94f1190a0b8c215ee25ca | fix a bug with funcarg setup and remove XXX comment because "scope=module" now would work but leaving it as session for now. | pytest-dev/execnet,alfredodeza/execnet | testing/conftest.py | testing/conftest.py | import py
def pytest_generate_tests(metafunc):
if 'gw' in metafunc.funcargnames:
if hasattr(metafunc.cls, 'gwtype'):
gwtypes = [metafunc.cls.gwtype]
else:
gwtypes = ['popen', 'socket', 'ssh']
for gwtype in gwtypes:
metafunc.addcall(id=gwtype, param=gwtype)
def pytest_funcarg__gw(request):
scope = "session"
if request.param == "popen":
return request.cached_setup(
setup=py.execnet.PopenGateway,
teardown=lambda gw: gw.exit(),
extrakey=request.param,
scope=scope)
elif request.param == "socket":
return request.cached_setup(
setup=setup_socket_gateway,
teardown=teardown_socket_gateway,
extrakey=request.param,
scope=scope)
elif request.param == "ssh":
return request.cached_setup(
setup=lambda: setup_ssh_gateway(request),
teardown=lambda gw: gw.exit(),
extrakey=request.param,
scope=scope)
def setup_socket_gateway():
proxygw = py.execnet.PopenGateway()
gw = py.execnet.SocketGateway.new_remote(proxygw, ("127.0.0.1", 0))
gw.proxygw = proxygw
return gw
def teardown_socket_gateway(gw):
gw.exit()
gw.proxygw.exit()
def setup_ssh_gateway(request):
sshhost = request.getfuncargvalue('specssh').ssh
gw = py.execnet.SshGateway(sshhost)
return gw
| import py
def pytest_generate_tests(metafunc):
if 'gw' in metafunc.funcargnames:
if hasattr(metafunc.cls, 'gwtype'):
gwtypes = [metafunc.cls.gwtype]
else:
gwtypes = ['popen', 'socket', 'ssh']
for gwtype in gwtypes:
metafunc.addcall(id=gwtype, param=gwtype)
def pytest_funcarg__gw(request):
scope = "session" # XXX module causes problems with -n 3!
if request.param == "popen":
return request.cached_setup(
setup=py.execnet.PopenGateway,
teardown=lambda gw: gw.exit(),
extrakey=request.param,
scope=scope)
elif request.param == "socket":
return request.cached_setup(
setup=setup_socket_gateway,
teardown=teardown_socket_gateway,
extrakey=request.param,
scope=scope)
elif request.param == "ssh":
return request.cached_setup(
setup=lambda: setup_ssh_gateway(request),
teardown=lambda gw: gw.exit(),
extrakey=request.param,
scope=scope)
def setup_socket_gateway():
proxygw = py.execnet.PopenGateway()
gw = py.execnet.SocketGateway.new_remote(proxygw, ("127.0.0.1", 0))
gw.proxygw = proxygw
return gw
def teardown_socket_gateway(gw):
gw.exit()
gw.proxygw.exit()
def setup_ssh_gateway(request):
sshhost = request.getfuncargvalue('specssh').ssh
gw = py.execnet.SshGateway(sshhost)
return gw
| mit | Python |
fe35867409af3bdf9898b68ce356ef00b865ff29 | Change version to 1.0.2 (stable) | xcgd/account_credit_transfer | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
{
"name": "Account Credit Transfer",
"version": "1.0.2",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """Account Voucher Credit Transfer Payment.
You need to set up some things before using it.
A credit transfer config link a bank with a parser
A credit transfer parser link a parser with a template that you can upload
""",
"depends": [
'base',
'account_streamline',
],
"data": [
"security/ir.model.access.csv",
"views/config.xml",
"views/parser.xml",
"views/res.bank.xml",
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
'external_dependencies': {
'python': ['genshi']
}
}
| # -*- coding: utf-8 -*-
{
"name": "Account Credit Transfer",
"version": "1.0.1",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """Account Voucher Credit Transfer Payment.
You need to set up some things before using it.
A credit transfer config link a bank with a parser
A credit transfer parser link a parser with a template that you can upload
""",
"depends": [
'base',
'account_streamline',
],
"data": [
"security/ir.model.access.csv",
"views/config.xml",
"views/parser.xml",
"views/res.bank.xml",
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
'external_dependencies': {
'python': ['genshi']
}
}
| agpl-3.0 | Python |
2ebe62217f646ede745f5e148902d8570dba09dd | add tests for games and players methods | danielwillgeorge/ESPN-fantasy-football-analytics | tests/basic_test.py | tests/basic_test.py | import unittest
import sys
import pandas as pd
import espnfantasyfootball
from espnfantasyfootball.api import teams
#sys.path.append("/Users/daniel.george/Desktop/github/ESPN-fantasy-football-analytics/fantasy-football-analytics")
#from api import teams
"""Tests for ESPN-fantasy-football-analytics."""
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_is_teams_success(self):
self.assertIsInstance(teams(), pd.DataFrame)
def test_is_games_success(self):
self.assertIsInstance(games(2015), pd.DataFrame)
def test_is_players_success(self):
self.assertIsInstance(players(400554447), pd.DataFrame)
if __name__ == '__main__':
unittest.main() | import unittest
import sys
import pandas as pd
sys.path.append("/Users/daniel.george/Desktop/github/ESPN-fantasy-football-analytics/fantasy-football-analytics")
import espnfantasyfootball
from espnfantasyfootball.api import teams
#from api import teams
"""Tests for ESPN-fantasy-football-analytics."""
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_is_teams_success(self):
self.assertIsInstance(teams(), pd.DataFrame)
if __name__ == '__main__':
unittest.main() | mit | Python |
563f2e153437e7f78e05ed9dade1bd1690bef6a5 | Add session_timeout to ReservationAdmin list_display | Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet | karspexet/ticket/admin.py | karspexet/ticket/admin.py | from django.contrib import admin
from karspexet.ticket.models import Account, Reservation, Ticket, Voucher, PricingModel
class ReservationAdmin(admin.ModelAdmin):
list_display = ('show', 'total', 'finalized', 'reservation_code', 'session_timeout', 'tickets')
list_filter = ('finalized', 'show')
class TicketAdmin(admin.ModelAdmin):
list_display = ('price', 'ticket_type', 'show', 'seat', 'account', 'ticket_code')
class VoucherAdmin(admin.ModelAdmin):
list_display = ('amount', 'code', 'expiry_date', 'created_by')
list_filter = ('expiry_date', 'created_by')
class PricingModelAdmin(admin.ModelAdmin):
list_display = ('seating_group', 'prices', 'valid_from')
class AccountAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'phone')
admin.site.register(Reservation, ReservationAdmin)
admin.site.register(Ticket, TicketAdmin)
admin.site.register(Voucher, VoucherAdmin)
admin.site.register(PricingModel, PricingModelAdmin)
admin.site.register(Account, AccountAdmin)
| from django.contrib import admin
from karspexet.ticket.models import Account, Reservation, Ticket, Voucher, PricingModel
class ReservationAdmin(admin.ModelAdmin):
list_display = ('show', 'total', 'finalized', 'reservation_code', 'tickets')
list_filter = ('finalized', 'show')
class TicketAdmin(admin.ModelAdmin):
list_display = ('price', 'ticket_type', 'show', 'seat', 'account', 'ticket_code')
class VoucherAdmin(admin.ModelAdmin):
list_display = ('amount', 'code', 'expiry_date', 'created_by')
list_filter = ('expiry_date', 'created_by')
class PricingModelAdmin(admin.ModelAdmin):
list_display = ('seating_group', 'prices', 'valid_from')
class AccountAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'phone')
admin.site.register(Reservation, ReservationAdmin)
admin.site.register(Ticket, TicketAdmin)
admin.site.register(Voucher, VoucherAdmin)
admin.site.register(PricingModel, PricingModelAdmin)
admin.site.register(Account, AccountAdmin)
| mit | Python |
5f1715307587a3ac429bea716919ed52b8550934 | add a import line for fasta | schae234/LocusPocus,LinkageIO/LocusPocus | locuspocus/__init__.py | locuspocus/__init__.py | __version__ = '0.1.0'
__all__ = ['Locus']
#import pyximport
#pyximport.install()
from .Locus import Locus
from .Loci import Loci
from .Fasta import Fasta
| __version__ = '0.1.0'
__all__ = ['Locus']
#import pyximport
#pyximport.install()
from .Locus import Locus
from .Loci import Loci
| mit | Python |
ec3d9798994453c1e15f9bab072318a518de26b7 | Fix py-llvmlite (#14083) | LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-llvmlite/package.py | var/spack/repos/builtin/packages/py-llvmlite/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLlvmlite(PythonPackage):
"""A lightweight LLVM python binding for writing JIT compilers"""
homepage = "http://llvmlite.readthedocs.io/en/latest/index.html"
url = "https://pypi.io/packages/source/l/llvmlite/llvmlite-0.23.0.tar.gz"
version('0.29.0', sha256='3adb0d4c9a17ad3dca82c7e88118babd61eeee0ee985ce31fa43ec27aa98c963')
version('0.27.1', sha256='48a1c3ae69fd8920cba153bfed8a46ac46474bc706a2100226df4abffe0000ab')
version('0.26.0', sha256='13e84fe6ebb0667233074b429fd44955f309dead3161ec89d9169145dbad2ebf')
version('0.25.0', sha256='fd64def9a51dd7dc61913a7a08eeba5b9785522740bec5a7c5995b2a90525025')
version('0.23.0', sha256='bc8b1b46274d05b578fe9e980a6d98fa71c8727f6f9ed31d4d8468dce7aa5762')
version('0.20.0', sha256='b2f174848df16bb9195a07fec102110a06d018da736bd9b3570a54d44c797c29')
depends_on('py-setuptools', type='build')
depends_on('python@2.6:2.8,3.4:', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3.99')
# llvmlite compatibility information taken from https://github.com/numba/llvmlite#compatibility
depends_on('llvm@7.0:8.0.99', when='@0.29.0:')
depends_on('llvm@7.0:7.99', when='@0.27.0:0.28.99')
depends_on('llvm@6.0:6.99', when='@0.23.0:0.26.99')
depends_on('llvm@4.0:4.99', when='@0.17.0:0.20.99')
depends_on('binutils', type='build')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLlvmlite(PythonPackage):
"""A lightweight LLVM python binding for writing JIT compilers"""
homepage = "http://llvmlite.readthedocs.io/en/latest/index.html"
url = "https://pypi.io/packages/source/l/llvmlite/llvmlite-0.23.0.tar.gz"
version('0.27.1', sha256='48a1c3ae69fd8920cba153bfed8a46ac46474bc706a2100226df4abffe0000ab')
version('0.26.0', sha256='13e84fe6ebb0667233074b429fd44955f309dead3161ec89d9169145dbad2ebf')
version('0.25.0', sha256='fd64def9a51dd7dc61913a7a08eeba5b9785522740bec5a7c5995b2a90525025')
version('0.23.0', sha256='bc8b1b46274d05b578fe9e980a6d98fa71c8727f6f9ed31d4d8468dce7aa5762')
version('0.20.0', sha256='b2f174848df16bb9195a07fec102110a06d018da736bd9b3570a54d44c797c29')
depends_on('py-setuptools', type='build')
depends_on('python@2.6:2.8,3.4:', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3.99')
depends_on('llvm@7.0:7.99', when='@0.27.0:')
depends_on('llvm@6.0:6.99', when='@0.23.0:0.26.99')
depends_on('llvm@4.0:4.99', when='@0.17.0:0.20.99')
depends_on('binutils', type='build')
| lgpl-2.1 | Python |
d8dbbe04b00452274016f6938400010ea9103bcc | change the "rules" in policy to 'object' type | darren-wang/ks3,darren-wang/ks3 | keystone/policy/schema.py | keystone/policy/schema.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import validation
from keystone.common.validation import parameter_types
_policy_create_properties = {
'domain_id': parameter_types.id_string,
'name': parameter_types.name,
'description': validation.nullable(parameter_types.description),
'enabled': parameter_types.boolean,
'rules': {
'type': 'object'
}
}
_policy_update_properties = {
'name': parameter_types.name,
'description': validation.nullable(parameter_types.description),
'enabled': parameter_types.boolean,
'rules': {
'type': 'object'
}
}
policy_create = {
'type': 'object',
'properties': _policy_create_properties,
'required': ['domain_id', 'name', 'rules'],
'additionalProperties': True
}
policy_update = {
'type': 'object',
'properties': _policy_update_properties,
'minProperties': 1,
'additionalProperties': True
}
_rule_create_properties = {
'domain_id': parameter_types.id_string,
'policy_id': parameter_types.id_string,
'service': {
'type': 'string'
},
'permission': {
'type': 'string'
},
'condition': {
'type': 'string'
},
}
_rule_update_properties = {
'service': {
'type': 'string'
},
'permission': {
'type': 'string'
},
'condition': {
'type': 'string'
},
}
rule_create = {
'type': 'object',
'properties': _rule_create_properties,
'required': ['domain_id', 'policy_id', 'service',
'permission', 'condition'],
'additionalProperties': False
}
rule_update = {
'type': 'object',
'properties': _rule_update_properties,
'minProperties': 1,
'additionalProperties': False
}
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import validation
from keystone.common.validation import parameter_types
_policy_create_properties = {
'domain_id': parameter_types.id_string,
'name': parameter_types.name,
'description': validation.nullable(parameter_types.description),
'enabled': parameter_types.boolean,
'rules': {
'type': 'string'
}
}
_policy_update_properties = {
'name': parameter_types.name,
'description': validation.nullable(parameter_types.description),
'enabled': parameter_types.boolean,
'rules': {
'type': 'string'
}
}
policy_create = {
'type': 'object',
'properties': _policy_create_properties,
'required': ['domain_id', 'name', 'rules'],
'additionalProperties': True
}
policy_update = {
'type': 'object',
'properties': _policy_update_properties,
'minProperties': 1,
'additionalProperties': True
}
_rule_create_properties = {
'domain_id': parameter_types.id_string,
'policy_id': parameter_types.id_string,
'service': {
'type': 'string'
},
'permission': {
'type': 'string'
},
'condition': {
'type': 'string'
},
}
_rule_update_properties = {
'service': {
'type': 'string'
},
'permission': {
'type': 'string'
},
'condition': {
'type': 'string'
},
}
rule_create = {
'type': 'object',
'properties': _rule_create_properties,
'required': ['domain_id', 'policy_id', 'service',
'permission', 'condition'],
'additionalProperties': False
}
rule_update = {
'type': 'object',
'properties': _rule_update_properties,
'minProperties': 1,
'additionalProperties': False
}
| apache-2.0 | Python |
f932504ad5fb4e4973e54260a8ce97ee2a3e096a | Allow runtable to print compact view | tamasgal/km3pipe,tamasgal/km3pipe | km3pipe/utils/runtable.py | km3pipe/utils/runtable.py | # Filename: runtable.py
"""
Prints the run table for a given detector ID.
Usage:
runtable [options] DET_ID
runtable (-h | --help)
runtable --version
Options:
-h --help Show this screen.
-c Compact view.
-n RUNS Number of runs.
-s REGEX Regular expression to filter the runsetup name/id.
DET_ID Detector ID (eg. D_ARCA001).
"""
import re
import sys
import km3pipe as kp
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2018, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
log = kp.logger.get_logger(__name__)
def runtable(det_id, n=5, compact=False, sep='\t', regex=None):
"""Print the run table of the last `n` runs for given detector"""
db = kp.db.DBManager()
df = db.run_table(det_id)
if regex is not None:
try:
re.compile(regex)
except re.error:
log.error("Invalid regex!")
return
df = df[df['RUNSETUPNAME'].str.contains(regex)
| df['RUNSETUPID'].str.contains(regex)]
if n is not None:
df = df.tail(n)
if compact:
df = df[['RUN', 'DATETIME', 'RUNSETUPNAME']]
df.to_csv(sys.stdout, sep=sep)
def main():
from docopt import docopt
args = docopt(__doc__, version=kp.version)
try:
n = int(args['-n'])
except TypeError:
n = None
runtable(args['DET_ID'], n, regex=args['-s'], compact=args['-c'])
| # Filename: runtable.py
"""
Prints the run table for a given detector ID.
Usage:
runtable [-n RUNS] [-s REGEX] DET_ID
runtable (-h | --help)
runtable --version
Options:
-h --help Show this screen.
-n RUNS Number of runs.
-s REGEX Regular expression to filter the runsetup name/id.
DET_ID Detector ID (eg. D_ARCA001).
"""
import re
import sys
import km3pipe as kp
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2018, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
log = kp.logger.get_logger(__name__)
def runtable(det_id, n=5, sep='\t', regex=None):
"""Print the run table of the last `n` runs for given detector"""
db = kp.db.DBManager()
df = db.run_table(det_id)
if regex is not None:
try:
re.compile(regex)
except re.error:
log.error("Invalid regex!")
return
df = df[df['RUNSETUPNAME'].str.contains(regex)
| df['RUNSETUPID'].str.contains(regex)]
if n is not None:
df = df.tail(n)
df.to_csv(sys.stdout, sep=sep)
def main():
from docopt import docopt
args = docopt(__doc__, version=kp.version)
try:
n = int(args['-n'])
except TypeError:
n = None
runtable(args['DET_ID'], n, regex=args['-s'])
| mit | Python |
a38efe3fe3a717d9ad91bfc6aacab90989cd04a4 | Bump develop version [ci skip] | nephila/djangocms-page-meta,nephila/djangocms-page-meta | djangocms_page_meta/__init__.py | djangocms_page_meta/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
__version__ = '0.8.5.post1'
__author__ = 'Iacopo Spalletti <i.spalletti@nephila.it>'
default_app_config = 'djangocms_page_meta.apps.PageMetaConfig'
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
__version__ = '0.8.5'
__author__ = 'Iacopo Spalletti <i.spalletti@nephila.it>'
default_app_config = 'djangocms_page_meta.apps.PageMetaConfig'
| bsd-3-clause | Python |
3b1482b3e61f8de2e64c336bb12d24318037323b | adjust migration script path not found | Ouranosinc/Magpie,Ouranosinc/Magpie,Ouranosinc/Magpie | run_migration.py | run_migration.py | import os
curr_dir = os.path.dirname(__file__)
if curr_dir == '':
curr_dir = os.path.abspath('.')
alembic_ini = '{}/alembic.ini'.format(curr_dir)
os.system('alembic -c {} upgrade heads'.format(alembic_ini))
| import os
curr_dir = os.path.dirname(__file__)+'/alembic.ini'
os.system('alembic -c {} upgrade heads'.format(curr_dir)) | apache-2.0 | Python |
919c9a9fe9a4bbced0a460a0cd1cfa05e427eef2 | Fix recursive init | stencila/hub,stencila/hub,stencila/hub,stencila/hub,stencila/hub | director/general/custom_storages.py | director/general/custom_storages.py | '''
Allow for one S3 bucket with different roots for static and "media" (user uploads)
See:
http://stackoverflow.com/questions/10390244/how-to-set-up-a-django-project-with-django-storages-and-amazon-s3-but-with-diff
https://gist.github.com/defrex/82680e858281d3d3e6e4
http://www.laurii.info/2013/05/improve-s3boto-djangostorages-performance-custom-settings/
See https://github.com/jschneier/django-storages/blob/master/storages/backends/s3boto.py#L189
for available settings.
'''
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
from django.core.files.storage import FileSystemStorage
class StaticS3BotoStorage(S3BotoStorage):
'''
Used for "STATIC_ROOT" which is for static files
'''
host = 's3-us-west-2.amazonaws.com'
bucket_name = 'static.stenci.la'
secure_urls = False
class UploadsS3BotoStorage(S3BotoStorage):
'''
Used for "MEDIA_ROOT" which is for user uploads
'''
host = 's3-us-west-2.amazonaws.com'
bucket_name = 'uploads.stenci.la'
secure_urls = False
class SnapshotsS3BotoStorage(S3BotoStorage):
'''
Used for `components.models.Snapshot`
'''
def __init__(self):
self.host = 's3-us-west-2.amazonaws.com'
self.bucket_name = 'snapshots.stenci.la'
self.secure_urls = False
S3BotoStorage.__init__(self)
class SnapshotsFileSystemStorage(FileSystemStorage):
'''
Used for `components.models.Snapshot` when in local mode
'''
location = 'snapshots'
if settings.MODE=='local':
SnapshotsStorage = SnapshotsFileSystemStorage
else:
SnapshotsStorage = SnapshotsS3BotoStorage
| '''
Allow for one S3 bucket with different roots for static and "media" (user uploads)
See:
http://stackoverflow.com/questions/10390244/how-to-set-up-a-django-project-with-django-storages-and-amazon-s3-but-with-diff
https://gist.github.com/defrex/82680e858281d3d3e6e4
http://www.laurii.info/2013/05/improve-s3boto-djangostorages-performance-custom-settings/
See https://github.com/jschneier/django-storages/blob/master/storages/backends/s3boto.py#L189
for available settings.
'''
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
from django.core.files.storage import FileSystemStorage
class StaticS3BotoStorage(S3BotoStorage):
'''
Used for "STATIC_ROOT" which is for static files
'''
host = 's3-us-west-2.amazonaws.com'
bucket_name = 'static.stenci.la'
secure_urls = False
class UploadsS3BotoStorage(S3BotoStorage):
'''
Used for "MEDIA_ROOT" which is for user uploads
'''
host = 's3-us-west-2.amazonaws.com'
bucket_name = 'uploads.stenci.la'
secure_urls = False
class SnapshotsS3BotoStorage(S3BotoStorage):
'''
Used for `components.models.Snapshot`
'''
def __init__(self):
self.host = 's3-us-west-2.amazonaws.com'
self.bucket_name = 'snapshots.stenci.la'
self.secure_urls = False
SnapshotsS3BotoStorage.__init__(self)
class SnapshotsFileSystemStorage(FileSystemStorage):
'''
Used for `components.models.Snapshot` when in local mode
'''
location = 'snapshots'
if settings.MODE=='local':
SnapshotsStorage = SnapshotsFileSystemStorage
else:
SnapshotsStorage = SnapshotsS3BotoStorage
| apache-2.0 | Python |
2bb618031134358240132fdcf6f67b746dc38109 | update styling of evaluation metrics | dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy | disaggregator/evaluation_metrics.py | disaggregator/evaluation_metrics.py | import numpy as np
import math
import scipy
def sum_error(truth,prediction):
'''
Given a numpy array of truth values and prediction values, returns the
absolute value of the difference between their sums.
'''
return math.fabs(truth.sum()-prediction.sum())
def rss(truth,prediction):
'''Sum of squared residuals'''
return np.sum(np.square(np.subtract(truth,prediction)))
def guess_truth_from_power(signals,threshold):
'''
Helper function for ground truth signals without on/off information.
Given a series of power readings returns a numpy array where x[i]=0
if signals[i] < threshold and x[i]=1 if signals[i] >= threshold
'''
return np.array([1 if i>=threshold else 0 for i in signals])
def get_positive_negative_stats(true_states, predicted_states):
'''
Returns a list of series where the first series is the true positives,
the second series is the false negatives, the third series is the true
negatives and the fourth series is the false positives. I would like
to make this a truth table instead of putting the logic directly in the
list comprehension.
'''
true_positives = np.array([a and b for (a,b) in zip(true_states,predicted_states)])
false_negatives = np.array([1 if a==1 and b==0 else 0 for (a,b) in zip(true_states,predicted_states)])
true_negatives = np.array([1 if a==0 and b==0 else 0 for (a,b) in zip(true_states,predicted_states)])
false_positives = np.array([1 if a==0 and b==1 else 0 for (a,b) in zip(true_states,predicted_states)])
return [true_positives,false_negatives,true_negatives,false_positives]
def get_sensitivity(true_positives,false_negatives):
'''
Given a numpy array of true positives, and false negatives returns a
sensitivty measure. Then the sensitivity is equal to TP/(TP+FN), where TP
is a true positive, such that TP=1 when the predicted value was correctly
classified as positive and 0 otherwise and FN is false negative, such that
FN = 1 if a value was falsely predicted to be negative and 0 otherwise.
'''
return true_positives.sum()/(np.sum(true_positives.sum(),false_negatives.sum()))
def get_specificity(true_negatives, false_positives):
'''
Given a numpy array of true negatives, and false positives returns a
specificty measure. The specificity measure is equal to TN/(TN+FP), where
TN is a true negative, such that TN=1 when the predicted value was
correctly classified as negative and 0 otherwise and FP is a false
positive, such that FP = 1 if a value was falsely predicted to be positive
and 0 otherwise.
'''
return true_negatives.sum()/(np.sum(true_negatives.sum(),false_positives.sum()))
def get_precision(true_positives,false_positives):
'''Given a numpy array of true positives, and false positives returns a
precision measure. The precision measure is equal to TP/(TP+FP), where TP
is a true positive, such that TP=1 when the predicted value was correctly
classified as positive and 0 otherwise and FP is a false positive, such
that FP = 1 if a value was falsely predicted to be positive and 0
otherwise.
'''
return true_positives.sum()/(np.sum(true_positives.sum(),false_positives.sum()))
def get_accuracy():
pass
| import numpy as np
import math
import scipy
def sum_error(truth,prediction):
'''For a numpy array of truth values and prediction values returns the absolute value of the difference between their sums'''
return math.fabs(truth.sum()-prediction.sum())
def rss(truth,prediction):
'''Sum of squared residuals'''
return np.sum(np.square(np.subtract(truth,prediction)))
def guess_truth_from_power(signals,threshold):
'''Helper function for ground truth signals without on/off information. '\n' Given a series of power readings returns a numpy array where x[i]=0 if signals[i]< threshold and x[i]=1 if signals[i]>= threshold'''
return np.array([1 if i>=threshold else 0 for i in signals])
def get_positive_negative_stats(true_states, predicted_states):
'''Returns a list of series where the first series is the true positives, the second series is the false negatives, the third series is the true negatives and the fourth series is the false positives. I would like to make this a truth table instead of putting the logic directly in the list comprehension.'''
true_positives = np.array([a and b for (a,b) in zip(true_states,predicted_states)])
false_negatives = np.array([1 if a==1 and b==0 else 0 for (a,b) in zip(true_states,predicted_states)])
true_negatives = np.array([1 if a==0 and b==0 else 0 for (a,b) in zip(true_states,predicted_states)])
false_positives = np.array([1 if a==0 and b==1 else 0 for (a,b) in zip(true_states,predicted_states)])
return [true_positives,false_negatives,true_negatives,false_positives]
def get_sensitivity(true_positives,false_negatives):
'''Given a numpy array of true positives, and false negatives returns a sensitivty measure. Then the sensitivity is equal to TP/(TP+FN), where TP is a true positive, such that TP=1 when the predicted value was correctly classified as positive and 0 otherwise and FN is false negative, such that FN = 1 if a value was falsely predicted to be negative and 0 otherwise.
'''
return true_positives.sum()/(np.sum(true_positives.sum(),false_negatives.sum()))
def get_specificity(true_negatives, false_positives):
'''Given a numpy array of true negatives, and false positives returns a specificty measure. The specificity measure is equal to TN/(TN+FP), where TN is a true negative, such that TN=1 when the predicted value was correctly classified as negative and 0 otherwise and FP is a false positive, such that FP = 1 if a value was falsely predicted to be positive and 0 otherwise.
'''
return true_negatives.sum()/(np.sum(true_negatives.sum(),false_positives.sum()))
def get_precision(true_positives,false_positives):
'''Given a numpy array of true positives, and false positives returns a precision measure. The precision measure is equal to TP/(TP+FP), where TP is a true positive, such that TP=1 when the predicted value was correctly classified as positive and 0 otherwise and FP is a false positive, such that FP = 1 if a value was falsely predicted to be positive and 0 otherwise.
'''
return true_positives.sum()/(np.sum(true_positives.sum(),false_positives.sum()))
def get_accuracy():
pass
| mit | Python |
c7355960600312c624cdfa919894684a9952753a | fix vocab2lex | phsmit/iwclul2016-scripts,phsmit/iwclul2016-scripts,phsmit/iwclul2016-scripts | 02_amtraining/base_scripts/vocab2lex.py | 02_amtraining/base_scripts/vocab2lex.py | #!/usr/bin/env python3
import sys
def main(phone_map, abbreviations):
phone_map = {v[0]: v[1].strip()
for v in (l.split(None, 1)
for l in open(phone_map, encoding='utf-8'))}
abbr_map = {v[0]: v[1].strip().split(',')
for v in (l.split(None, 1)
for l in open(abbreviations, encoding='utf-8'))} if abbreviations is not None else {}
o = sys.stdout.buffer
o.write(b"__(1.0) __\n")
o.write(b"_(1.0) _\n")
o.write(b"<s>(1.0)\n")
o.write(b"</s>(1.0)\n")
for word in sys.stdin.readlines():
word = word.strip()
transcriptions = []
basic = [phone_map[c] for c in word if c in phone_map]
if len(basic) > 0:
transcriptions.append(basic)
if word in abbr_map:
for abbr in abbr_map[word]:
transcriptions.append([phone_map[c] for c in abbr if c in phone_map])
for trans in transcriptions:
o.write("{} ({:.1f}) ".format(word, 1/len(transcriptions)).encode("utf-8"))
rtrans = ["_"]+trans+["_"]
for i in range(1, len(trans)+1):
o.write("{}-{}+{} ".format(rtrans[i-1],rtrans[i],rtrans[i+1]).encode("iso-8859-15"))
o.write(b"\n")
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)
| #!/usr/bin/env python3
import sys
def main(phone_map, abbreviations):
phone_map = {v[0]: v[1].strip()
for v in (l.split(None, 1)
for l in open(phone_map, encoding='utf-8'))}
abbr_map = {v[0]: v[1].strip().split(',')
for v in (l.split(None, 1)
for l in open(abbreviations, encoding='utf-8'))} if abbreviations is not None else {}
o = sys.stdout.buffer
o.write(b"__(1.0) __\n")
o.write(b"_(1.0) _\n")
o.write(b"<s>(1.0)\n")
o.write(b"</s>(1.0)\n")
for word in sys.stdin.readlines():
word = word.strip()
transcriptions = []
basic = [phone_map[c] for c in word if c in phone_map]
if len(basic) > 0:
transcriptions.append(basic)
if word in abbr_map:
for abbr in abbr_map[word]:
transcriptions.append([phone_map[c] for c in abbr if c in phone_map])
for trans in transcriptions:
o.write("{} ({:.1f})\n".format(word, 1/len(transcriptions)).encode("utf-8"))
rtrans = ["_"]+trans+["_"]
for i in range(1, len(trans)+1):
o.write("{}-{}+{}".format(rtrans[i-1],rtrans[i],rtrans[i+1]).encode("iso-8859-15"))
o.write(b"\n")
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)
| bsd-3-clause | Python |
4fe4a32442e2b54abec73b4e9913ca50b851875c | Add import exception for docker-py | DimensionDataCBUSydney/mist.io,kelonye/mist.io,johnnyWalnut/mist.io,munkiat/mist.io,munkiat/mist.io,DimensionDataCBUSydney/mist.io,DimensionDataCBUSydney/mist.io,zBMNForks/mist.io,Lao-liu/mist.io,afivos/mist.io,DimensionDataCBUSydney/mist.io,Lao-liu/mist.io,munkiat/mist.io,johnnyWalnut/mist.io,kelonye/mist.io,munkiat/mist.io,Lao-liu/mist.io,kelonye/mist.io,afivos/mist.io,johnnyWalnut/mist.io,zBMNForks/mist.io,zBMNForks/mist.io,afivos/mist.io,Lao-liu/mist.io | src/mist/io/tests/helpers/docker_utils.py | src/mist/io/tests/helpers/docker_utils.py | try:
import docker
from mist.io.tests.settings import docker_nodes
except ImportError:
pass
def select_docker():
"""
Choose the container with the minimum load
"""
nodes = []
for node in docker_nodes:
nodes.append(docker.Client(base_url=node, version='1.10'))
if len(nodes) == 1:
chosen_node = nodes[0]
return chosen_node
containers_number = nodes[0].info()['Containers']
chosen_node = nodes[0]
for node in nodes:
if node.info()['Containers'] < containers_number:
containers_number = node.info()['Containers']
chosen_node = node
return chosen_node
def select_image(docker_node, flavor):
"""
Select image
"""
images = docker_node.images()
for image in images:
for tags in image['RepoTags']:
if flavor in tags:
image_id = image['Id']
if not image_id:
raise Exception("Could not find %s in images" % flavor)
return image_id
def create_container(docker_node, image_id, environment={}):
"""
Create docker container for the given image
"""
try:
container = docker_node.create_container(image=image_id, environment=environment)
return container['Id']
except Exception as e:
print e
def start_container(docker_node, container):
"""
Start previously created container
"""
try:
docker_node.start(container, publish_all_ports=True)
return
except Exception as e:
print e
def get_mist_uri(docker_node, container, port):
"""
Return the new uri, where io or core listens to
"""
base_url = docker_node.base_url[:-4]
mist_url = base_url + docker_node.port(container, int(port))[0]['HostPort']
return mist_url
def docker_all_in_one(flavor, environment={}):
if flavor not in ['io', 'core']:
raise Exception("Flavor must be either 'io', or 'core'")
node = select_docker()
image = select_image(node, flavor)
container = create_container(node, image, environment=environment)
start_container(node, container)
all_in_one = {
'URL': get_mist_uri(node, container, port="6543"),
'node': node,
'container': container
}
return all_in_one
| import docker
from mist.io.tests.settings import docker_nodes
def select_docker():
"""
Choose the container with the minimum load
"""
nodes = []
for node in docker_nodes:
nodes.append(docker.Client(base_url=node, version='1.10'))
if len(nodes) == 1:
chosen_node = nodes[0]
return chosen_node
containers_number = nodes[0].info()['Containers']
chosen_node = nodes[0]
for node in nodes:
if node.info()['Containers'] < containers_number:
containers_number = node.info()['Containers']
chosen_node = node
return chosen_node
def select_image(docker_node, flavor):
"""
Select image
"""
images = docker_node.images()
for image in images:
for tags in image['RepoTags']:
if flavor in tags:
image_id = image['Id']
if not image_id:
raise Exception("Could not find %s in images" % flavor)
return image_id
def create_container(docker_node, image_id, environment={}):
"""
Create docker container for the given image
"""
try:
container = docker_node.create_container(image=image_id, environment=environment)
return container['Id']
except Exception as e:
print e
def start_container(docker_node, container):
"""
Start previously created container
"""
try:
docker_node.start(container, publish_all_ports=True)
return
except Exception as e:
print e
def get_mist_uri(docker_node, container, port):
"""
Return the new uri, where io or core listens to
"""
base_url = docker_node.base_url[:-4]
mist_url = base_url + docker_node.port(container, int(port))[0]['HostPort']
return mist_url
def docker_all_in_one(flavor, environment={}):
if flavor not in ['io', 'core']:
raise Exception("Flavor must be either 'io', or 'core'")
node = select_docker()
image = select_image(node, flavor)
container = create_container(node, image, environment=environment)
start_container(node, container)
all_in_one = {
'URL': get_mist_uri(node, container, port="6543"),
'node': node,
'container': container
}
return all_in_one
| agpl-3.0 | Python |
f0adb9022408dbcfdbe7e83dc0633d560be4f533 | Use HTML5 date inputs for due_date | shacker/django-todo,shacker/django-todo,shacker/django-todo | todo/forms.py | todo/forms.py | from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import Group
from todo.models import Item, List
from django.contrib.auth import get_user_model
class AddListForm(ModelForm):
# The picklist showing allowable groups to which a new list can be added
# determines which groups the user belongs to. This queries the form object
# to derive that list.
def __init__(self, user, *args, **kwargs):
super(AddListForm, self).__init__(*args, **kwargs)
self.fields['group'].queryset = Group.objects.filter(user=user)
class Meta:
model = List
exclude = []
class AddItemForm(ModelForm):
# The picklist showing the users to which a new task can be assigned
# must find other members of the groups the current list belongs to.
def __init__(self, task_list, *args, **kwargs):
super(AddItemForm, self).__init__(*args, **kwargs)
# print dir(self.fields['list'])
# print self.fields['list'].initial
self.fields['assigned_to'].queryset = get_user_model().objects.filter(groups__in=[task_list.group])
self.fields['assigned_to'].label_from_instance = \
lambda obj: "%s (%s)" % (obj.get_full_name(), obj.username)
due_date = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))
title = forms.CharField(
widget=forms.widgets.TextInput(attrs={'size': 35})
)
note = forms.CharField(widget=forms.Textarea(), required=False)
class Meta:
model = Item
exclude = []
class EditItemForm(ModelForm):
# The picklist showing the users to which a new task can be assigned
# must find other members of the groups the current list belongs to.
def __init__(self, *args, **kwargs):
super(EditItemForm, self).__init__(*args, **kwargs)
self.fields['assigned_to'].queryset = get_user_model().objects.filter(groups__in=[self.instance.list.group])
due_date = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))
class Meta:
model = Item
exclude = ('created_date', 'created_by',)
class AddExternalItemForm(ModelForm):
"""Form to allow users who are not part of the GTD system to file a ticket."""
title = forms.CharField(
widget=forms.widgets.TextInput(attrs={'size': 35})
)
note = forms.CharField(
widget=forms.widgets.Textarea(),
help_text='Foo',
)
class Meta:
model = Item
exclude = ('list', 'created_date', 'due_date', 'created_by', 'assigned_to',)
class SearchForm(forms.Form):
"""Search."""
q = forms.CharField(
widget=forms.widgets.TextInput(attrs={'size': 35})
)
| from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import Group
from todo.models import Item, List
from django.contrib.auth import get_user_model
class AddListForm(ModelForm):
# The picklist showing allowable groups to which a new list can be added
# determines which groups the user belongs to. This queries the form object
# to derive that list.
def __init__(self, user, *args, **kwargs):
super(AddListForm, self).__init__(*args, **kwargs)
self.fields['group'].queryset = Group.objects.filter(user=user)
class Meta:
model = List
exclude = []
class AddItemForm(ModelForm):
# The picklist showing the users to which a new task can be assigned
# must find other members of the groups the current list belongs to.
def __init__(self, task_list, *args, **kwargs):
super(AddItemForm, self).__init__(*args, **kwargs)
# print dir(self.fields['list'])
# print self.fields['list'].initial
self.fields['assigned_to'].queryset = get_user_model().objects.filter(groups__in=[task_list.group])
self.fields['assigned_to'].label_from_instance = \
lambda obj: "%s (%s)" % (obj.get_full_name(), obj.username)
due_date = forms.DateField(
required=False,
widget=forms.DateTimeInput(attrs={'class': 'due_date_picker'})
)
title = forms.CharField(
widget=forms.widgets.TextInput(attrs={'size': 35})
)
note = forms.CharField(widget=forms.Textarea(), required=False)
class Meta:
model = Item
exclude = []
class EditItemForm(ModelForm):
# The picklist showing the users to which a new task can be assigned
# must find other members of the groups the current list belongs to.
def __init__(self, *args, **kwargs):
super(EditItemForm, self).__init__(*args, **kwargs)
self.fields['assigned_to'].queryset = get_user_model().objects.filter(groups__in=[self.instance.list.group])
class Meta:
model = Item
exclude = ('created_date', 'created_by',)
class AddExternalItemForm(ModelForm):
"""Form to allow users who are not part of the GTD system to file a ticket."""
title = forms.CharField(
widget=forms.widgets.TextInput(attrs={'size': 35})
)
note = forms.CharField(
widget=forms.widgets.Textarea(),
help_text='Foo',
)
class Meta:
model = Item
exclude = ('list', 'created_date', 'due_date', 'created_by', 'assigned_to',)
class SearchForm(forms.Form):
"""Search."""
q = forms.CharField(
widget=forms.widgets.TextInput(attrs={'size': 35})
)
| bsd-3-clause | Python |
4a0b5197b24e91413f3770c91dd460db25848df2 | Add a unit test for the default command | Sakshisaraswat/todoman,rimshaakhan/todoman,AnubhaAgrawal/todoman,pimutils/todoman,asalminen/todoman,hobarrera/todoman | tests/test_basic.py | tests/test_basic.py | import pytest
from todoman.cli import cli
def test_basic(tmpdir, runner):
result = runner.invoke(cli, ['list'], catch_exceptions=False)
assert not result.exception
assert result.output == ''
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:harhar\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'harhar' in result.output
def test_human(runner):
result = runner.invoke(cli, [
'new', '-l', 'default', '-d', 'tomorrow', 'hail belzebub'
])
assert not result.exception
assert 'belzebub' in result.output
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'belzebub' in result.output
@pytest.mark.xfail(reason='issue#9')
def test_two_events(tmpdir, runner):
result = runner.invoke(cli, [
'list'
])
assert not result.exception
assert result.output == ''
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:task one\n'
'END:VTODO\n'
'BEGIN:VTODO\n'
'SUMMARY:task two\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert len(result.output.splitlines()) == 2
assert 'task one' in result.output
assert 'task two' in result.output
def test_default_command(tmpdir, runner):
result = runner.invoke(cli, catch_exceptions=False)
assert not result.exception
assert result.output == ''
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:harhar\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli)
assert not result.exception
assert 'harhar' in result.output
# TODO: test aware/naive datetime sorting
# TODO: test --grep
| import pytest
from todoman.cli import cli
def test_basic(tmpdir, runner):
result = runner.invoke(cli, ['list'], catch_exceptions=False)
assert not result.exception
assert result.output == ''
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:harhar\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'harhar' in result.output
def test_human(runner):
result = runner.invoke(cli, [
'new', '-l', 'default', '-d', 'tomorrow', 'hail belzebub'
])
assert not result.exception
assert 'belzebub' in result.output
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'belzebub' in result.output
@pytest.mark.xfail(reason='issue#9')
def test_two_events(tmpdir, runner):
result = runner.invoke(cli, [
'list'
])
assert not result.exception
assert result.output == ''
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:task one\n'
'END:VTODO\n'
'BEGIN:VTODO\n'
'SUMMARY:task two\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert len(result.output.splitlines()) == 2
assert 'task one' in result.output
assert 'task two' in result.output
| isc | Python |
250ca36a6f2db3d38f34015b6094fc21e358db5e | Increase max_retries for flaky test | guoguo12/billboard-charts,guoguo12/billboard-charts | tests/test_dates.py | tests/test_dates.py | import datetime
import unittest
import billboard
from nose.tools import raises
class DateTest(unittest.TestCase):
def testDateRounding(self):
"""Checks that the Billboard website is rounding dates correctly: it should
round up to the nearest date on which a chart was published.
"""
chart = billboard.ChartData("hot-100", date="1958-01-01", max_retries=10)
self.assertEqual(chart.date, "1958-08-04") # The first Hot 100 chart
chart = billboard.ChartData("hot-100", date="2019-12-31")
self.assertEqual(chart.date, "2020-01-04")
def testPreviousNext(self):
"""Checks that the date, previousDate, and nextDate attributes are parsed
from the HTML, not computed. Specifically, we shouldn't assume charts are
always published seven days apart, since (as this example demonstrates)
this is not true.
"""
chart = billboard.ChartData("hot-100", date="1962-01-06")
self.assertEqual(chart.date, "1962-01-06")
self.assertEqual(chart.previousDate, "1961-12-25")
chart = billboard.ChartData("hot-100", date="1961-12-25")
self.assertEqual(chart.date, "1961-12-25")
self.assertEqual(chart.nextDate, "1962-01-06")
def testDatetimeDate(self):
"""Checks that ChartData correctly handles datetime objects as the
date parameter.
"""
chart = billboard.ChartData("hot-100", datetime.date(2016, 7, 9))
self.assertEqual(len(chart), 100)
self.assertEqual(chart.date, "2016-07-09")
@raises(ValueError)
def testUnsupportedDateFormat(self):
"""Checks that using an unsupported date format raises an exception."""
billboard.ChartData("hot-100", date="07-30-1996")
@raises(ValueError)
def testEmptyStringDate(self):
"""
Checks that passing an empty string as the date raises an exception.
"""
billboard.ChartData("hot-100", date="")
@raises(ValueError)
def testInvalidDate(self):
"""Checks that passing a correctly formatted but invalid date raises an exception."""
billboard.ChartData("hot-100", date="2018-99-99")
| import datetime
import unittest
import billboard
from nose.tools import raises
class DateTest(unittest.TestCase):
def testDateRounding(self):
"""Checks that the Billboard website is rounding dates correctly: it should
round up to the nearest date on which a chart was published.
"""
chart = billboard.ChartData("hot-100", date="1958-01-01")
self.assertEqual(chart.date, "1958-08-04") # The first Hot 100 chart
chart = billboard.ChartData("hot-100", date="2019-12-31")
self.assertEqual(chart.date, "2020-01-04")
def testPreviousNext(self):
"""Checks that the date, previousDate, and nextDate attributes are parsed
from the HTML, not computed. Specifically, we shouldn't assume charts are
always published seven days apart, since (as this example demonstrates)
this is not true.
"""
chart = billboard.ChartData("hot-100", date="1962-01-06")
self.assertEqual(chart.date, "1962-01-06")
self.assertEqual(chart.previousDate, "1961-12-25")
chart = billboard.ChartData("hot-100", date="1961-12-25")
self.assertEqual(chart.date, "1961-12-25")
self.assertEqual(chart.nextDate, "1962-01-06")
def testDatetimeDate(self):
"""Checks that ChartData correctly handles datetime objects as the
date parameter.
"""
chart = billboard.ChartData("hot-100", datetime.date(2016, 7, 9))
self.assertEqual(len(chart), 100)
self.assertEqual(chart.date, "2016-07-09")
@raises(ValueError)
def testUnsupportedDateFormat(self):
"""Checks that using an unsupported date format raises an exception."""
billboard.ChartData("hot-100", date="07-30-1996")
@raises(ValueError)
def testEmptyStringDate(self):
"""
Checks that passing an empty string as the date raises an exception.
"""
billboard.ChartData("hot-100", date="")
@raises(ValueError)
def testInvalidDate(self):
"""Checks that passing a correctly formatted but invalid date raises an exception."""
billboard.ChartData("hot-100", date="2018-99-99")
| mit | Python |
54e44f45e1ae7bed79fff482cfabbbf242bd9056 | Add a test for trailing commas | alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net | tests/test_links.py | tests/test_links.py | #!/usr/bin/env python
# -*- encoding: utf-8
import pytest
import requests
@pytest.mark.parametrize('path', [
# Check pagination is working correctly
'/page/2/', '/page/3/',
])
def test_pages_appear_correctly(path):
resp = requests.get(f'http://localhost:5757/{path}')
assert resp.status_code == 200
@pytest.mark.parametrize('path, text_in_page', [
('2017/', 'Posts from 2017'),
('2017/07/', 'Posts from July 2017'),
('', 'Older posts'),
('', '<title>alexwlchan</title>'),
('archive/', '<h3>2017</h3>'),
# Smartypants is working
('2017/09/lazy-reading-in-python', 'kept in Amazon S3 – XML exports'),
('2017/09/ode-to-docopt', 'I’ve used it in multiple languages'),
# Syntax highlighting is being applied correctly
('2017/09/useful-git-commands/', '''<code class="language-console" data-lang="console"><span></span><span class="gp">$</span> git rev-parse --show-toplevel
<span class="go">/Users/alexwlchan/repos/alexwlchan.net</span>
</code>'''),
# We're not adding trailing commas to tags
('2017/09/ode-to-docopt', 'python</a>, <a'),
])
def test_text_appears_in_pages(path, text_in_page):
resp = requests.get(f'http://localhost:5757/{path}')
assert resp.status_code == 200
assert text_in_page in resp.text
@pytest.mark.parametrize('path, text', [
# Year markers only appear in the global archives, not year or month pages
('2017/', '<h3>2017</h3>'),
('2017/07/', '<h3>2017</h3>'),
])
def test_text_does_not_appear_in_pages(path, text):
resp = requests.get(f'http://localhost:5757/{path}')
assert resp.status_code == 200
assert text not in resp.text
| #!/usr/bin/env python
# -*- encoding: utf-8
import pytest
import requests
@pytest.mark.parametrize('path', [
# Check pagination is working correctly
'/page/2/', '/page/3/',
])
def test_pages_appear_correctly(path):
resp = requests.get(f'http://localhost:5757/{path}')
assert resp.status_code == 200
@pytest.mark.parametrize('path, text_in_page', [
('2017/', 'Posts from 2017'),
('2017/07/', 'Posts from July 2017'),
('', 'Older posts'),
('', '<title>alexwlchan</title>'),
('archive/', '<h3>2017</h3>'),
# Smartypants is working
('2017/09/lazy-reading-in-python', 'kept in Amazon S3 – XML exports'),
('2017/09/ode-to-docopt', 'I’ve used it in multiple languages'),
# Syntax highlighting is being applied correctly
('2017/09/useful-git-commands/', '''<code class="language-console" data-lang="console"><span></span><span class="gp">$</span> git rev-parse --show-toplevel
<span class="go">/Users/alexwlchan/repos/alexwlchan.net</span>
</code>'''),
])
def test_text_appears_in_pages(path, text_in_page):
resp = requests.get(f'http://localhost:5757/{path}')
assert resp.status_code == 200
assert text_in_page in resp.text
@pytest.mark.parametrize('path, text', [
# Year markers only appear in the global archives, not year or month pages
('2017/', '<h3>2017</h3>'),
('2017/07/', '<h3>2017</h3>'),
])
def test_text_does_not_appear_in_pages(path, text):
resp = requests.get(f'http://localhost:5757/{path}')
assert resp.status_code == 200
assert text not in resp.text
| mit | Python |
8889b5e85308d2c8b276e5b678de32cabe26516f | expand multisite tests | SexualHealthInnovations/callisto-core,project-callisto/callisto-core,scattermagic/django-wizard-builder,SexualHealthInnovations/django-wizard-builder,SexualHealthInnovations/django-wizard-builder,project-callisto/callisto-core,SexualHealthInnovations/callisto-core,scattermagic/django-wizard-builder | tests/test_sites.py | tests/test_sites.py | from wizard_builder.models import QuestionPage
from django.contrib.sites.models import Site
from django.test import TestCase
class SitePageTest(TestCase):
def test_basic_created_question_page_comes_with_a_site(self):
page = QuestionPage.objects.create()
self.assertEqual(page.site.domain, 'example.com')
def test_on_site_increments_for_default_site(self):
count_before = QuestionPage.objects.on_site().count()
page = QuestionPage.objects.create()
count_after = QuestionPage.objects.on_site().count()
self.assertEqual(count_before + 1, count_after)
def test_on_site_does_not_increment_for_alternate_site(self):
count_before = QuestionPage.objects.on_site().count()
page = QuestionPage.objects.create()
page.site = Site.objects.create()
page.save()
count_after = QuestionPage.objects.on_site().count()
self.assertEqual(count_before, count_after)
| from wizard_builder.models import QuestionPage
from django.test import TestCase
class SitePageTest(TestCase):
def test_basic_created_question_page_comes_with_a_site(self):
page = QuestionPage.objects.create()
self.assertEqual(page.site.domain, 'example.com')
| agpl-3.0 | Python |
dd78cfb80f17adeb431119a7693be5d1a727e384 | expand site_id tests further! | scattermagic/django-wizard-builder,SexualHealthInnovations/callisto-core,SexualHealthInnovations/django-wizard-builder,project-callisto/callisto-core,project-callisto/callisto-core,SexualHealthInnovations/callisto-core,scattermagic/django-wizard-builder,SexualHealthInnovations/django-wizard-builder | tests/test_sites.py | tests/test_sites.py | from wizard_builder.models import QuestionPage
from django.contrib.sites.models import Site
from django.test import TestCase
from django.conf import settings
class TempSiteID():
'''
with TempSiteID(1):
...
'''
def __init__(self, site_id):
self.site_id_temp = site_id
def __enter__(self):
self.site_id_stable = getattr(settings, 'SITE_ID', 1)
settings.SITE_ID = self.site_id_temp
def __exit__(self, *args):
settings.SITE_ID = self.site_id_stable
class SitePageTest(TestCase):
def test_created_question_page_comes_with_a_site(self):
page = QuestionPage.objects.create()
self.assertEqual(page.site.domain, 'example.com')
def test_on_site_increments_for_default_site(self):
count_before = QuestionPage.objects.on_site().count()
QuestionPage.objects.create()
count_after = QuestionPage.objects.on_site().count()
self.assertEqual(count_before + 1, count_after)
def test_on_site_does_not_increment_for_alternate_site(self):
count_before = QuestionPage.objects.on_site().count()
page = QuestionPage.objects.create()
page.site = Site.objects.create()
page.save()
count_after = QuestionPage.objects.on_site().count()
self.assertEqual(count_before, count_after)
def test_question_page_responds_to_site_id_changes(self):
site_1_pages = 3
site_2_pages = site_1_pages + 1
site_2 = Site.objects.create()
for i in range(site_1_pages):
QuestionPage.objects.create()
for i in range(site_2_pages):
QuestionPage.objects.create(site=site_2)
self.assertEqual(QuestionPage.objects.on_site().count(), site_1_pages)
with TempSiteID(site_2.id):
self.assertEqual(QuestionPage.objects.on_site().count(), site_2_pages)
| from wizard_builder.models import QuestionPage
from django.contrib.sites.models import Site
from django.test import TestCase
class SitePageTest(TestCase):
def test_basic_created_question_page_comes_with_a_site(self):
page = QuestionPage.objects.create()
self.assertEqual(page.site.domain, 'example.com')
def test_on_site_increments_for_default_site(self):
count_before = QuestionPage.objects.on_site().count()
QuestionPage.objects.create()
count_after = QuestionPage.objects.on_site().count()
self.assertEqual(count_before + 1, count_after)
def test_on_site_does_not_increment_for_alternate_site(self):
count_before = QuestionPage.objects.on_site().count()
page = QuestionPage.objects.create()
page.site = Site.objects.create()
page.save()
count_after = QuestionPage.objects.on_site().count()
self.assertEqual(count_before, count_after)
| agpl-3.0 | Python |
24f1f686c5cdc9a2272adbea7d1c2e1eb481dc8d | Make vertical white space after license header consistent | varunarya10/oslo.i18n,openstack/oslo.i18n | tests/unit/fakes.py | tests/unit/fakes.py | # Copyright 2012 Intel Inc, OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For filter and weight tests.
"""
from openstack.common.scheduler import weights
class FakeWeigher1(weights.BaseHostWeigher):
def __init__(self):
pass
class FakeWeigher2(weights.BaseHostWeigher):
def __init__(self):
pass
class FakeClass(object):
def __init__(self):
pass
| # Copyright 2012 Intel Inc, OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For filter and weight tests.
"""
from openstack.common.scheduler import weights
class FakeWeigher1(weights.BaseHostWeigher):
def __init__(self):
pass
class FakeWeigher2(weights.BaseHostWeigher):
def __init__(self):
pass
class FakeClass(object):
def __init__(self):
pass
| apache-2.0 | Python |
c37c53b45580179440dcda701b7d94b0f8b9eb47 | add source command | tipsi/tipsi_tools,tipsi/tipsi_tools | tipsi_tools/unix.py | tipsi_tools/unix.py | import os
import socket
import subprocess
import time
import re
from contextlib import closing
from collections import ChainMap
def _prepare(out):
out = out.decode('utf8').strip('\n').split('\n')
if out == ['']:
return []
return out
def run(command):
'''
Run command in shell, accepts command construction from list
Return (return_code, stdout, stderr)
stdout and stderr - as list of strings
'''
if isinstance(command, list):
command = ' '.join(command)
out = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return (out.returncode, _prepare(out.stdout), _prepare(out.stderr))
def succ(cmd, check_stderr=True):
'''
Alias to run with check return code and stderr
'''
code, out, err = run(cmd)
if code != 0:
for l in out:
print(l)
assert code == 0, 'Return: {} {}\nStderr: {}'.format(code, cmd, err)
if check_stderr:
assert err == [], 'Error: {} {}'.format(err, code)
return code, out, err
def check_socket(host, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
try:
if sock.connect_ex((host, port)) == 0:
return True
except:
return False
def wait_result(func, result, timeout):
count = 0
while True:
res = func()
if result is None and res is None:
return True
elif res == result:
return True
time.sleep(1)
count += 1
if count > timeout:
return False
def wait_socket(host, port, timeout=120):
'''
Wait for socket opened on remote side. Return False after timeout
'''
return wait_result(lambda: check_socket(host, port), True, timeout)
def wait_no_socket(host, port, timeout=120):
return wait_result(lambda: check_socket(host, port), False, timeout)
def interpolate_sysenv(line, defaults={}):
'''
Format line system environment variables + defaults
'''
map = ChainMap(os.environ, defaults)
return line.format(**map)
def source(fname):
'''
Act's similar to bash 'source' or '.' commands.
'''
rex = re.compile('(?:export |declare -x )?(.*?)="(.*?)"')
out = call_out('source {} && export'.format(fname))
out = [x for x in out if 'export' in x or 'declare' in x]
out = {k:v for k, v in [rex.match(x).groups() for x in out if rex.match(x)]}
for k, v in out.items():
os.environ[k] = v
| import os
import socket
import subprocess
import time
from contextlib import closing
from collections import ChainMap
def _prepare(out):
out = out.decode('utf8').strip('\n').split('\n')
if out == ['']:
return []
return out
def run(command):
'''
Run command in shell, accepts command construction from list
Return (return_code, stdout, stderr)
stdout and stderr - as list of strings
'''
if isinstance(command, list):
command = ' '.join(command)
out = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return (out.returncode, _prepare(out.stdout), _prepare(out.stderr))
def succ(cmd, check_stderr=True):
'''
Alias to run with check return code and stderr
'''
code, out, err = run(cmd)
if code != 0:
for l in out:
print(l)
assert code == 0, 'Return: {} {}\nStderr: {}'.format(code, cmd, err)
if check_stderr:
assert err == [], 'Error: {} {}'.format(err, code)
return code, out, err
def check_socket(host, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
try:
if sock.connect_ex((host, port)) == 0:
return True
except:
return False
def wait_result(func, result, timeout):
count = 0
while True:
res = func()
if result is None and res is None:
return True
elif res == result:
return True
time.sleep(1)
count += 1
if count > timeout:
return False
def wait_socket(host, port, timeout=120):
'''
Wait for socket opened on remote side. Return False after timeout
'''
return wait_result(lambda: check_socket(host, port), True, timeout)
def wait_no_socket(host, port, timeout=120):
return wait_result(lambda: check_socket(host, port), False, timeout)
def interpolate_sysenv(line, defaults={}):
'''
Format line system environment variables + defaults
'''
map = ChainMap(os.environ, defaults)
return line.format(**map)
| mit | Python |
aad92644d01994685d20121def511da2765adfad | Add neighbrhood property to row | datascopeanalytics/chicago-new-business,datascopeanalytics/chicago-new-business | src/data.py | src/data.py | import csv
import datetime
class Row(dict):
def __init__(self, *args, **kwargs):
super(Row, self).__init__(*args, **kwargs)
self._start_date = None
self._end_date = None
def _cast_date(self, s):
if not s:
return None
return datetime.datetime.strptime(s, '%m/%d/%Y').date()
def _get_date_or_cast(self, s, attr):
if getattr(self, attr) is None:
setattr(self, attr, self._cast_date(s))
return getattr(self, attr)
@property
def start_date(self):
return self._get_date_or_cast(
self['DATE ISSUED'],
'_start_date',
)
@property
def end_date(self):
return self._get_date_or_cast(
self['LICENSE TERM EXPIRATION DATE'],
'_end_date',
)
@property
def account_number(self):
return self['ACCOUNT NUMBER']
@property
def neighborhood(self):
return self['NEIGHBORHOOD']
class RawReader(csv.DictReader):
def __iter__(self, *args, **kwargs):
row = self.next()
while row:
yield Row(row)
row = self.next()
class RawWriter(csv.DictWriter):
pass
| import csv
import datetime
class Row(dict):
def __init__(self, *args, **kwargs):
super(Row, self).__init__(*args, **kwargs)
self._start_date = None
self._end_date = None
def _cast_date(self, s):
if not s:
return None
return datetime.datetime.strptime(s, '%m/%d/%Y').date()
def _get_date_or_cast(self, s, attr):
if getattr(self, attr) is None:
setattr(self, attr, self._cast_date(s))
return getattr(self, attr)
@property
def start_date(self):
return self._get_date_or_cast(
self['DATE ISSUED'],
'_start_date',
)
@property
def end_date(self):
return self._get_date_or_cast(
self['LICENSE TERM EXPIRATION DATE'],
'_end_date',
)
@property
def account_number(self):
return self['ACCOUNT NUMBER']
class RawReader(csv.DictReader):
def __iter__(self, *args, **kwargs):
row = self.next()
while row:
yield Row(row)
row = self.next()
class RawWriter(csv.DictWriter):
pass
| unlicense | Python |
31a7e838b971fc9063c9dd7c35129f857fda6e0a | Include author name | CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer | wafer/management/commands/wafer_talk_video_reviewers.py | wafer/management/commands/wafer_talk_video_reviewers.py | import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
talk.get_authors_display_name(),
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
| import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
| isc | Python |
6ec7fac46c0970a91398b5f43168821fd4f3eb5c | Update main.py | bwolatz/CSCI4900,bwolatz/CSCI4900 | src/main.py | src/main.py | #!/usr/bin/env python
"""Imports"""
import subprocess
import glob
import os
import re
pomdir = os.getcwd()
jardir = pomdir + "/jars"
treedir = pomdir + "/tree"
mvncommand = "mvn -q dependency:copy-dependencies -DcopyPom=true -DoutputDirectory="+jardir
mvncommand2 = "mvn -q dependency:tree -Doutput=" + treedir + "/tree.txt -DoutputType=tgf"
subprocess.call(mvncommand, shell=True)
os.chdir(pomdir)
subprocess.call(mvncommand2, shell=True)
os.chdir(treedir)
p = re.compile('^[A-z]+$')
count = 0
dep1Jars = []
dep2Jars = []
dep3Jars = []
depfile = open('tree.txt', 'r')
for line in depfile:
""" skip first line """
if count == 0:
count += 1
elif len(line) > 9:
if p.match(line[3]) and line[3] != '\\':
dep1Jars.append(line[3:])
elif p.match(line[6]) and line[6] != '\\':
dep2Jars.append(line[6:])
elif p.match(line[9]) and line[9] != '\\':
dep3Jars.append(line[9:])
os.chdir(jardir)
str = ""
for jar1 in dep1Jars:
jar1.rstrip()
str = jar1.split(':')
jar1 = str[1] + "-" + str[3] + ".jar"
subprocess.call('dosocs2 scan '+jar1, shell=True)
for jar2 in dep2Jars:
jar2.rstrip()
str = jar2.split(':')
jar2 = str[1] + "-" + str[3] + ".jar"
subprocess.call('dosocs2 scan '+jar2, shell=True)
for jar3 in dep3Jars:
jar3.rstrip()
str = jar3.split(':')
jar3 = str[1] + "-" + str[3] + ".jar"
subprocess.call('dosocs2 scan '+jar3, shell=True)
| #!/usr/bin/env python
"""Imports"""
import subprocess
import glob
import os
currentdir = "/usr/share/fossology/nomos/agent/jars"
pomdir = "/usr/share/fossology/nomos/agent/"
mvncommand = "mvn -q dependency:copy-dependencies -DcopyPom=true -DoutputDirectory="+currentdir
mvncommand2 = "mvn dependency:tree -Doutput=" + pomdir + "/tree.txt -DoutputType=text"
subprocess.call(mvncommand, shell=True)
os.chdir(pomdir)
subprocess.call(mvncommand2, shell=True)
os.chdir(currentdir)
for jar in glob.glob('*.jar'):
subprocess.call('dosocs2 oneshot '+jar, shell=True)
| mit | Python |
14b5ac625bd676518997ec4d7073ac01efdcfbb8 | Update 3n+1.py | TheAlgorithms/Python | Maths/3n+1.py | Maths/3n+1.py | def main():
def n31(a):# a = initial number
c = 0
l = [a]
while a != 1:
if a % 2 == 0:#if even divide it by 2
a = a // 2
elif a % 2 == 1:#if odd 3n+1
a = 3*a +1
c += 1#counter
l += [a]
return l , c
print(n31(43))
print(n31(98)[0][-1])# = a
print("It took {0} steps.".format(n31(13)[1]))#optional finish
if __name__ == '__main__':
main()
| def main():
def n31(a):# a = initial number
c = 0
l = [a]
while a != 1:
if a % 2 == 0:#if even divide it by 2
a = a // 2
elif a % 2 == 1:#if odd 3n+1
a = 3*a +1
c += 1#counter
l += [a]
return l , c
print(n31(43))
print(n31(98)[0][-1])# = a
print("It took {0} steps.".format(n31(13))[1]))#optional finish
if __name__ == '__main__':
main()
| mit | Python |
5fe4575a6618b30456ff283a75e01f0ea68d3e7f | Optimize RGen. | vlinhd11/apk-signer,akhirasip/apk-signer,akhirasip/apk-signer,vlinhd11/apk-signer | tools/RGen.py | tools/RGen.py | #!/usr/bin/python3
# Copyright (C) 2012 Hai Bison
#
# See the file LICENSE at the root directory of this project for copying
# permission.
'''
This tool parses `messages.properties` and generates all strings to their
respective IDs, which can be put into class R.string.
'''
import os
import os.path
import re
import sys
SOURCE_FILE = os.sep.join([os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))),
'code','src','group','pals','desktop','app',
'apksigner','i18n','messages_en.properties'])
# PARSE SOURCE FILE
# Should start from 1...
count = 1
with open(SOURCE_FILE, 'r') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#'):
print('public static final int {} = 0x{:08x};'\
.format(re.sub(r'(?s)=.*', '', line).strip(), count))
count += 1
| #!/usr/bin/python3
# Copyright (C) 2012 Hai Bison
#
# See the file LICENSE at the root directory of this project for copying
# permission.
'''
This tool parses `messages.properties` and generates all strings to their
respective IDs, which can be put into class R.string.
'''
import os
import os.path
import re
import sys
SOURCE_FILE = os.sep.join([os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))),
'code','src','group','pals','desktop','app',
'apksigner','i18n','messages_en.properties'])
# PARSE SOURCE FILE
# Should start from 1...
count = 1
with open(SOURCE_FILE, 'r') as f:
for line in f:
line = line.strip()
if not line.startswith('#') and len(line) > 0:
print('public static final int {} = 0x{:08x};'\
.format(re.sub(r'(?s)=.*', '', line).strip(), count))
count += 1
| mit | Python |
86305247d60feeafa529877f9141949fce20d523 | Make plot target axis configureable | dseuss/pythonlibs | tools/plot.py | tools/plot.py | #!/usr/bin/env python
# encoding: utf-8
"""Tools making everyday plotting tasks easier."""
from __future__ import division, print_function
import numpy as np
from matplotlib import pyplot as pl
def plot(function, intervall, num=500, axis=None, **kwargs):
"""Plots the function f on the axisis axis on the intervall (xmin, xmaxis)
:param function: Functions or list of function to plot
:param intervall: Intervall to plot function on (xmin, xmaxis)
:param num: Number of points used for the plot (default 500)
:param axis: Axis to plot on (default current axisis)
:returns: Plot (or list of plots)
"""
if hasattr(function, '__iter__'):
return [plot(f, intervall, num, axis, **kwargs) for f in function]
else:
x = np.linspace(*intervall, num=num)
axis = pl.gca() if axis is None else axis
return axis.plot(x, function(x), **kwargs)
def imshow(img, ax=None, **kwargs):
"""Shows the image `img` passed as numpy array in a much prettier way
:param np.ndarray img: Image to show passed as RGB or grayscale image
"""
ax = ax if ax is not None else pl.gca()
ax.grid(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.imshow(img, **kwargs)
pl.axis((0, img.shape[1], img.shape[0], 0))
pl.show()
| #!/usr/bin/env python
# encoding: utf-8
"""Tools making everyday plotting tasks easier."""
from __future__ import division, print_function
import numpy as np
from matplotlib import pyplot as pl
def plot(function, intervall, num=500, axis=None, **kwargs):
"""Plots the function f on the axisis axis on the intervall (xmin, xmaxis)
:param function: Functions or list of function to plot
:param intervall: Intervall to plot function on (xmin, xmaxis)
:param num: Number of points used for the plot (default 500)
:param axis: Axis to plot on (default current axisis)
:returns: Plot (or list of plots)
"""
if hasattr(function, '__iter__'):
return [plot(f, intervall, num, axis, **kwargs) for f in function]
else:
x = np.linspace(*intervall, num=num)
axis = pl.gca() if axis is None else axis
return axis.plot(x, function(x), **kwargs)
def imshow(img, **kwargs):
"""Shows the image `img` passed as numpy array in a much prettier way
:param np.ndarray img: Image to show passed as RGB or grayscale image
"""
fig = pl.figure(0)
ax = fig.add_subplot(111, autoscale_on=False)
ax.grid(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.imshow(img, **kwargs)
pl.axis((0, img.shape[1], img.shape[0], 0))
pl.show()
| unlicense | Python |
188de0dae55449dc1d6a15917423b2ec9d9a3fc1 | change owner to eventkit | terranodo/eventkit-cloud,venicegeo/eventkit-cloud,terranodo/eventkit-cloud,venicegeo/eventkit-cloud,terranodo/eventkit-cloud,venicegeo/eventkit-cloud,venicegeo/eventkit-cloud,venicegeo/eventkit-cloud,venicegeo/eventkit-cloud,terranodo/eventkit-cloud | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
import pwd
import grp
if __name__ == "__main__":
if os.getenv("PRODUCTION"):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eventkit_cloud.settings.prod")
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eventkit_cloud.settings.dev")
from django.core.management import execute_from_command_line
if os.getenv("COVERAGE"):
is_testing = 'test' in sys.argv
if is_testing:
import coverage
if not os.path.exists('./coverage'):
os.mkdir('./coverage')
os.chmod('./coverage', 0775)
uid = pwd.getpwnam('eventkit').pw_uid
gid = grp.getgrnam('eventkit').gr_uid
os.chown('./coverage', uid, gid)
cov = coverage.coverage(config_file=".coveragerc",
source=["eventkit_cloud"])
cov.erase()
cov.start()
execute_from_command_line(sys.argv)
if is_testing:
cov.stop()
cov.save()
cov.report()
cov.html_report(directory='./coverage')
else:
execute_from_command_line(sys.argv) | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if os.getenv("PRODUCTION"):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eventkit_cloud.settings.prod")
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eventkit_cloud.settings.dev")
from django.core.management import execute_from_command_line
if os.getenv("COVERAGE"):
is_testing = 'test' in sys.argv
if is_testing:
import coverage
if not os.path.exists('./coverage'):
os.mkdir('./coverage')
os.chmod('./coverage', 0775)
cov = coverage.coverage(config_file=".coveragerc",
source=["eventkit_cloud"])
cov.erase()
cov.start()
execute_from_command_line(sys.argv)
if is_testing:
cov.stop()
cov.save()
cov.report()
cov.html_report(directory='./coverage')
else:
execute_from_command_line(sys.argv) | bsd-3-clause | Python |
0976ce5424e9b4699152513821ea15473f715dab | Fix manage.py (remove CR from newlines) | dragonfly-science/django-pigeonpost,dragonfly-science/django-pigeonpost | manage.py | manage.py | #!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the directory containing %r.
It appears you've customized things.
You'll have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError somehow.)
""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| #!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the directory containing %r.
It appears you've customized things.
You'll have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError somehow.)
""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| mit | Python |
83afe8cf85be49af16c8e6da7110fb76a38b5843 | Drop unicode compatibility handling | pinax/pinax-blog,pinax/pinax-blog,pinax/pinax-blog | pinax/blog/conf.py | pinax/blog/conf.py | from django.conf import settings # noqa
from appconf import AppConf
from .utils import load_path_attr
def is_installed(package):
try:
__import__(package)
return True
except ImportError:
return False
DEFAULT_MARKUP_CHOICE_MAP = {
"markdown": {"label": "Markdown", "parser": "pinax.blog.parsers.markdown_parser.parse"}
}
if is_installed("creole"):
DEFAULT_MARKUP_CHOICE_MAP.update({
"creole": {"label": "Creole", "parser": "pinax.blog.parsers.creole_parser.parse"},
})
class PinaxBlogAppConf(AppConf):
ALL_SECTION_NAME = "all"
SECTIONS = []
UNPUBLISHED_STATES = [
"Draft"
]
FEED_TITLE = "Blog"
SECTION_FEED_TITLE = "Blog (%s)"
MARKUP_CHOICE_MAP = DEFAULT_MARKUP_CHOICE_MAP
MARKUP_CHOICES = DEFAULT_MARKUP_CHOICE_MAP
SCOPING_MODEL = None
SCOPING_URL_VAR = None
SLUG_UNIQUE = False
PAGINATE_BY = 10
HOOKSET = "pinax.blog.hooks.PinaxBlogDefaultHookSet"
ADMIN_JS = ("js/admin_post_form.js",)
def configure_markup_choices(self, value):
return [
(key, value[key]["label"])
for key in value.keys()
]
def configure_hookset(self, value):
return load_path_attr(value)()
class Meta:
prefix = "pinax_blog"
| from __future__ import unicode_literals
from django.conf import settings # noqa
from appconf import AppConf
from .utils import load_path_attr
def is_installed(package):
try:
__import__(package)
return True
except ImportError:
return False
DEFAULT_MARKUP_CHOICE_MAP = {
"markdown": {"label": "Markdown", "parser": "pinax.blog.parsers.markdown_parser.parse"}
}
if is_installed("creole"):
DEFAULT_MARKUP_CHOICE_MAP.update({
"creole": {"label": "Creole", "parser": "pinax.blog.parsers.creole_parser.parse"},
})
class PinaxBlogAppConf(AppConf):
ALL_SECTION_NAME = "all"
SECTIONS = []
UNPUBLISHED_STATES = [
"Draft"
]
FEED_TITLE = "Blog"
SECTION_FEED_TITLE = "Blog (%s)"
MARKUP_CHOICE_MAP = DEFAULT_MARKUP_CHOICE_MAP
MARKUP_CHOICES = DEFAULT_MARKUP_CHOICE_MAP
SCOPING_MODEL = None
SCOPING_URL_VAR = None
SLUG_UNIQUE = False
PAGINATE_BY = 10
HOOKSET = "pinax.blog.hooks.PinaxBlogDefaultHookSet"
ADMIN_JS = ("js/admin_post_form.js",)
def configure_markup_choices(self, value):
return [
(key, value[key]["label"])
for key in value.keys()
]
def configure_hookset(self, value):
return load_path_attr(value)()
class Meta:
prefix = "pinax_blog"
| mit | Python |
ade3ae6e3889e9e8c73313130896922f0d2d9af2 | improve and add feature | Rostlab/LocText,juanmirocks/LocText,juanmirocks/LocText,Rostlab/LocText,Rostlab/LocText,juanmirocks/LocText | loctext/learning/annotators.py | loctext/learning/annotators.py | from nalaf.learning.taggers import RelationExtractor
from nalaf.learning.taggers import StubSameSentenceRelationExtractor
from nalaf.learning.svmlight import SVMLightTreeKernels
from nalaf.structures.relation_pipelines import RelationExtractionPipeline
from nalaf.features.relations import NamedEntityCountFeatureGenerator
from loctext.relations.specific import LocationWordFeatureGenerator
class LocTextBaselineRelationExtractor(RelationExtractor):
def __init__(
self,
entity1_class,
entity2_class,
rel_type):
super().__init__(entity1_class, entity2_class, rel_type)
self.__annotator = StubSameSentenceRelationExtractor(entity1_class, entity2_class, rel_type)
def annotate(self, corpus):
return self.__annotator.annotate(corpus)
class LocTextRelationExtractor(RelationExtractor):
@staticmethod
def default_feature_generators(prot_e_id, loc_e_id, graphs=None):
#GRAPHS_CLOSURE_VARIABLE = {} if graphs is None else graphs
return [
LocationWordFeatureGenerator(loc_e_id, prefix1=2),
NamedEntityCountFeatureGenerator(prot_e_id, 107),
NamedEntityCountFeatureGenerator(loc_e_id, 108),
]
def __init__(
self,
entity1_class,
entity2_class,
rel_type,
bin_model,
pipeline=None,
svmlight=None,
execute_pipeline=True):
super().__init__(entity1_class, entity2_class, rel_type)
self.bin_model = bin_model
self.svmlight = svmlight if svmlight else SVMLightTreeKernels(model_path=self.bin_model, use_tree_kernel=False)
feature_generators = LocTextRelationExtractor.default_feature_generators(self.entity1_class, self.entity2_class)
self.pipeline = pipeline if pipeline else RelationExtractionPipeline(entity1_class, entity2_class, rel_type, feature_generators=feature_generators)
self.execute_pipeline = execute_pipeline
def annotate(self, corpus):
if self.execute_pipeline:
self.pipeline.execute(corpus, train=False)
instancesfile = self.svmlight.create_input_file(corpus, 'predict', self.pipeline.feature_set)
predictionsfile = self.svmlight.tag(instancesfile)
self.svmlight.read_predictions(corpus, predictionsfile)
return corpus
| from nalaf.learning.taggers import RelationExtractor
from nalaf.structures.dataset_pipelines import PrepareDatasetPipeline
from nalaf.learning.taggers import StubSameSentenceRelationExtractor
from nalaf.learning.svmlight import SVMLightTreeKernels
from nalaf.structures.relation_pipelines import RelationExtractionPipeline
from nalaf.features.relations import NamedEntityCountFeatureGenerator
class LocTextBaselineRelationExtractor(RelationExtractor):
def __init__(
self,
entity1_class,
entity2_class,
rel_type):
super().__init__(entity1_class, entity2_class, rel_type)
self.__annotator = StubSameSentenceRelationExtractor(entity1_class, entity2_class, rel_type)
def annotate(self, corpus):
return self.__annotator.annotate(corpus)
class LocTextRelationExtractor(RelationExtractor):
@staticmethod
def default_feature_generators(class1, class2, graphs=None):
#GRAPHS_CLOSURE_VARIABLE = {} if graphs is None else graphs
return [
NamedEntityCountFeatureGenerator(class1, 107), # Protein
NamedEntityCountFeatureGenerator(class2, 108), # Location
]
def __init__(
self,
entity1_class,
entity2_class,
rel_type,
bin_model,
pipeline=None,
svmlight=None,
execute_pipeline=True):
super().__init__(entity1_class, entity2_class, rel_type)
self.bin_model = bin_model
self.svmlight = svmlight if svmlight else SVMLightTreeKernels(model_path=self.bin_model, use_tree_kernel=False)
feature_generators = LocTextRelationExtractor.default_feature_generators(self.entity1_class, self.entity2_class)
self.pipeline = pipeline if pipeline else RelationExtractionPipeline(entity1_class, entity2_class, rel_type, feature_generators=feature_generators)
self.execute_pipeline = execute_pipeline
def annotate(self, corpus):
if self.execute_pipeline:
self.pipeline.execute(corpus, train=False)
instancesfile = self.svmlight.create_input_file(corpus, 'predict', self.pipeline.feature_set)
predictionsfile = self.svmlight.tag(instancesfile)
self.svmlight.read_predictions(corpus, predictionsfile)
return corpus
| apache-2.0 | Python |
bec3d37c6fc1ed61d3a528a8b54e1f9c4c98a4c9 | update doc string | AsylumConnect/asylum-connect-catalog,hack4impact/asylum-connect-catalog,hack4impact/flask-base,ColinHaley/Konsole,hack4impact/asylum-connect-catalog,AsylumConnect/asylum-connect-catalog,tobymccann/flask-base,ColinHaley/Konsole,AsylumConnect/asylum-connect-catalog,hack4impact/asylum-connect-catalog,tobymccann/flask-base,AsylumConnect/asylum-connect-catalog,hack4impact/asylum-connect-catalog,hack4impact/flask-base,tobymccann/flask-base,ColinHaley/Konsole,hack4impact/flask-base | manage.py | manage.py | #!/usr/bin/env python
import os
import subprocess
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager, Shell
from redis import Redis
from rq import Connection, Queue, Worker
from app import create_app, db
from app.models import Role, User
if os.path.exists('.env'):
print('Importing environment from .env file')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.drop_all()
db.create_all()
db.session.commit()
@manager.option(
'-n',
'--number-users',
default=10,
type=int,
help='Number of each model type to create',
dest='number_users')
def add_fake_data(number_users):
"""
Adds fake data to the database.
"""
User.generate_fake(count=number_users)
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production."""
Role.insert_roles()
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(
host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc --skip env .'
yapf = 'yapf -e "./env/*" -r -i .'
print 'Running {}'.format(isort)
subprocess.call(isort, shell=True)
print 'Running {}'.format(yapf)
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| #!/usr/bin/env python
import os
import subprocess
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager, Shell
from redis import Redis
from rq import Connection, Queue, Worker
from app import create_app, db
from app.models import Role, User
if os.path.exists('.env'):
print('Importing environment from .env file')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.drop_all()
db.create_all()
db.session.commit()
@manager.option(
'-n',
'--number-users',
default=10,
type=int,
help='Number of each model type to create',
dest='number_users')
def add_fake_data(number_users):
"""
Adds fake data to the database.
"""
User.generate_fake(count=number_users)
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production."""
Role.insert_roles()
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(
host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf formatter over the project."""
isort = 'isort -rc --skip env .'
yapf = 'yapf -e "./env/*" -r -i .'
print 'Running {}'.format(isort)
subprocess.call(isort, shell=True)
print 'Running {}'.format(yapf)
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| mit | Python |
6747fc4a4e648aff5af8b4b865b52e9a5c9e8ca3 | Add manager command | KIGOTHO/hdx-age-api,luiscape/hdx-monitor-ageing-service,luiscape/hdx-monitor-ageing-service,reubano/HDX-Age-API,reubano/HDX-Age-API,KIGOTHO/hdx-age-api | manage.py | manage.py | #!/usr/bin/env python
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import os.path as p
from subprocess import call, check_call
from flask import current_app as app
from flask.ext.script import Server, Manager
from config import Config as c
from app import create_app, db
manager = Manager(create_app)
manager.add_option(
'-m', '--cfgmode', dest='config_mode', default='Development')
manager.add_option('-f', '--cfgfile', dest='config_file', type=p.abspath)
manager.add_command('runserver', Server(port=c.PORT))
manager.add_command('serve', Server(port=c.PORT))
manager.main = manager.run
@manager.command
def checkstage():
"""Checks staged with git pre-commit hook"""
path = p.join(p.dirname(__file__), 'app', 'tests', 'test.sh')
cmd = "sh %s" % path
return call(cmd, shell=True)
@manager.option('-F', '--file', help='Lint file', default='')
def lint(file):
"""Check style with flake8"""
return call("flake8 %s" % file, shell=True)
@manager.option('-w', '--where', help='Requirement file', default=None)
def test(where):
"""Run nose tests"""
cmd = "nosetests -xvw %s" % where if where else "nosetests -xv"
return call(cmd, shell=True)
@manager.command
def deploy():
"""Deploy staging app"""
check_call('heroku keys:add ~/.ssh/id_rsa.pub --remote staging', shell=True)
check_call('git push origin features', shell=True)
@manager.command
def deployprod():
"""Deploy production app"""
check_call(
'heroku keys:add ~/.ssh/id_rsa.pub --remote production', shell=True)
check_call('git push origin master', shell=True)
@manager.option('-r', '--requirement', help='Requirement file', default=None)
def pipme(requirement):
"""Install requirements.txt"""
prefix = '%s-' % requirement if requirement else ''
call('pippy -r %srequirements.txt' % prefix, shell=True)
@manager.command
def require():
"""Create requirements.txt"""
cmd = 'pip freeze -l | grep -vxFf dev-requirements.txt '
cmd += '| grep -vxFf prod-requirements.txt '
cmd += '> requirements.txt'
call(cmd, shell=True)
@manager.command
def createdb():
"""Creates database if it doesn't already exist"""
with app.app_context():
db.create_all()
print('Database created')
@manager.command
def cleardb():
"""Removes all content from database"""
with app.app_context():
db.drop_all()
print('Database cleared')
@manager.command
def resetdb():
"""Removes all content from database and creates new tables"""
with app.app_context():
cleardb()
createdb()
if __name__ == '__main__':
manager.run()
| #!/usr/bin/env python
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import os.path as p
from subprocess import call, check_call
from flask import current_app as app
from flask.ext.script import Server, Manager
from config import Config as c
from app import create_app, db
manager = Manager(create_app)
manager.add_option(
'-m', '--cfgmode', dest='config_mode', default='Development')
manager.add_option('-f', '--cfgfile', dest='config_file', type=p.abspath)
manager.add_command('runserver', Server(port=c.PORT))
manager.main = manager.run
@manager.command
def checkstage():
"""Checks staged with git pre-commit hook"""
path = p.join(p.dirname(__file__), 'app', 'tests', 'test.sh')
cmd = "sh %s" % path
return call(cmd, shell=True)
@manager.option('-F', '--file', help='Lint file', default='')
def lint(file):
"""Check style with flake8"""
return call("flake8 %s" % file, shell=True)
@manager.option('-w', '--where', help='Requirement file', default=None)
def test(where):
"""Run nose tests"""
cmd = "nosetests -xvw %s" % where if where else "nosetests -xv"
return call(cmd, shell=True)
@manager.command
def deploy():
"""Deploy staging app"""
check_call('heroku keys:add ~/.ssh/id_rsa.pub --remote staging', shell=True)
check_call('git push origin features', shell=True)
@manager.command
def deployprod():
"""Deploy production app"""
check_call(
'heroku keys:add ~/.ssh/id_rsa.pub --remote production', shell=True)
check_call('git push origin master', shell=True)
@manager.option('-r', '--requirement', help='Requirement file', default=None)
def pipme(requirement):
"""Install requirements.txt"""
prefix = '%s-' % requirement if requirement else ''
call('pippy -r %srequirements.txt' % prefix, shell=True)
@manager.command
def require():
"""Create requirements.txt"""
cmd = 'pip freeze -l | grep -vxFf dev-requirements.txt '
cmd += '| grep -vxFf prod-requirements.txt '
cmd += '> requirements.txt'
call(cmd, shell=True)
@manager.command
def createdb():
"""Creates database if it doesn't already exist"""
with app.app_context():
db.create_all()
print('Database created')
@manager.command
def cleardb():
"""Removes all content from database"""
with app.app_context():
db.drop_all()
print('Database cleared')
@manager.command
def resetdb():
"""Removes all content from database and creates new tables"""
with app.app_context():
cleardb()
createdb()
if __name__ == '__main__':
manager.run()
| mit | Python |
f43b3bc50e25f77a9bfbd18a3fb551a7cf644b18 | fix map generator to exclude .exe and .out files. | arunrajora/algorithms,arunrajora/algorithms,arunrajora/algorithms | mapgen.py | mapgen.py | import sys,os
root = "./codes/"
for path, subdirs, files in os.walk(root):
print "processing : "+path
print "\tfound "+str(len(files)+len(subdirs))+" files."
mapperFile=open(path+"/mapper.txt","w+")
for dir in subdirs:
print "\t\tadding directory : "+dir
mapperFile.write(dir+"\n");
for file in files:
if(file=="mapper.txt" or file.endswith(".exe") or file.endswith(".out")):
print "\t\tskipping mapper file."
else:
print "\t\tadding file : "+file
mapperFile.write(file+"\n");
mapperFile.close(); | import sys,os
root = "./codes/"
for path, subdirs, files in os.walk(root):
print "processing : "+path
print "\tfound "+str(len(files)+len(subdirs))+" files."
mapperFile=open(path+"/mapper.txt","w+")
for dir in subdirs:
print "\t\tadding directory : "+dir
mapperFile.write(dir+"\n");
for file in files:
if(file!="mapper.txt"):
print "\t\tadding file : "+file
mapperFile.write(file+"\n");
else:
print "\t\tskipping mapper file."
mapperFile.close(); | mit | Python |
dff1c464b82cb870f589f5edf2e81b38fd78405f | add meta options | mathewmarcus/marshmallow-pynamodb | marshmallow_pynamodb/schema.py | marshmallow_pynamodb/schema.py | from marshmallow import Schema, SchemaOpts, post_load
from marshmallow.schema import SchemaMeta
from pynamodb.attributes import Attribute
from marshmallow_pynamodb.convert import converter
from six import with_metaclass
class ModelOpts(SchemaOpts):
def __init__(self, meta):
SchemaOpts.__init__(self, meta)
self.model = getattr(meta, 'model', None)
self.validate = getattr(meta, 'validate', False)
self.sync = getattr(meta, 'sync', False)
class ModelMeta(SchemaMeta):
@classmethod
def get_declared_fields(mcs, klass, cls_fields, inherited_fields, dict_cls):
declared_fields = super(ModelMeta, mcs).get_declared_fields(klass, cls_fields, inherited_fields, dict_cls)
if klass.opts.model:
attributes = {name: attr for name, attr in vars(klass.opts.model).iteritems() if
isinstance(attr, Attribute)}
for attr_name, attribute in attributes.iteritems():
field = converter.attribute2field(attribute)()
if attribute.is_hash_key or attribute.is_range_key or not attribute.null:
field.required = True
declared_fields[attr_name] = field
return declared_fields
class ModelSchema(with_metaclass(ModelMeta, Schema)):
OPTIONS_CLASS = ModelOpts
@post_load
def hydrate_pynamo_model(self, data, sync=False):
return self.opts.model(**data)
| from marshmallow import Schema, SchemaOpts, post_load
from marshmallow.schema import SchemaMeta
from pynamodb.attributes import Attribute
from marshmallow_pynamodb.convert import converter
from six import with_metaclass
class ModelOpts(SchemaOpts):
def __init__(self, meta):
SchemaOpts.__init__(self, meta)
self.model = getattr(meta, 'model', None)
class ModelMeta(SchemaMeta):
@classmethod
def get_declared_fields(mcs, klass, cls_fields, inherited_fields, dict_cls):
declared_fields = super(ModelMeta, mcs).get_declared_fields(klass, cls_fields, inherited_fields, dict_cls)
if klass.opts.model:
attributes = {name: attr for name, attr in vars(klass.opts.model).iteritems() if
isinstance(attr, Attribute)}
for attr_name, attribute in attributes.iteritems():
field = converter.attribute2field(attribute)()
if attribute.is_hash_key or attribute.is_range_key or not attribute.null:
field.required = True
declared_fields[attr_name] = field
return declared_fields
class ModelSchema(with_metaclass(ModelMeta, Schema)):
OPTIONS_CLASS = ModelOpts
@post_load
def hydrate_pynamo_model(self, data, sync=False):
return self.opts.model(**data)
| mit | Python |
2e3c85383fb3b8c8d9fc42abca1824f052d12886 | update test | hvy/chainer,okuta/chainer,chainer/chainer,hvy/chainer,wkentaro/chainer,okuta/chainer,niboshi/chainer,chainer/chainer,wkentaro/chainer,chainer/chainer,niboshi/chainer,niboshi/chainer,okuta/chainer,hvy/chainer,chainer/chainer,okuta/chainer,wkentaro/chainer,hvy/chainer,wkentaro/chainer,pfnet/chainer,niboshi/chainer | tests/chainer_tests/optimizer_hooks_tests/test_gradient_hard_clipping.py | tests/chainer_tests/optimizer_hooks_tests/test_gradient_hard_clipping.py | import unittest
import numpy as np
import chainer
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
_backend_params = [
# NumPy
{},
# {'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
# {'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
# {'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
class SimpleLink(chainer.Link):
def __init__(self, params):
super(SimpleLink, self).__init__()
with self.init_scope():
for i, p in enumerate(params):
setattr(self, 'p{}'.format(i), p)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientHardClipping(unittest.TestCase):
def setUp(self):
num_params = 3
arrs = [
np.random.uniform(-3, 3, (2, 3)).astype(np.float32)
for _ in range(num_params)]
grads = [
np.random.uniform(-3, 3, (2, 3)).astype(np.float32)
for _ in range(num_params)]
params = []
for arr, grad in zip(arrs, grads):
param = chainer.Parameter(arr)
param.grad = grad
params.append(param)
self.target = SimpleLink(params)
def check_hardclipping(self, backend_configs):
target = self.target
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
lower_bound = -0.9
upper_bound = 1.1
expects = []
# Compute expected
for param, device in zip(target.params(), devices):
expects.append(param.array - np.clip(param.grad,
lower_bound, upper_bound))
param.to_device(device)
# Apply optimizer_hook
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(
optimizer_hooks.GradientHardClipping(lower_bound, upper_bound))
opt.update()
# Validate
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
def test_hardclipping_cpu(self, backend_config0,
backend_config1, backend_config2):
self.check_hardclipping(
[backend_config0, backend_config1, backend_config2])
@testing.attr.gpu
def test_hardclipping_gpu(self, backend_config0,
backend_config1, backend_config2):
self.target.to_gpu()
self.check_hardclipping(
[backend_config0, backend_config1, backend_config2])
testing.run_module(__name__, __file__)
| import unittest
import numpy as np
import chainer
from chainer import backend
import chainer.initializers as I
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(I.Zero(), w.shape)
self.param.data = w
self.param.grad = g
class TestGradientHardClipping(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_hardclipping(self):
w = self.target.param.data
g = self.target.param.grad
xp = backend.get_array_module(w)
lower_bound = -0.9
upper_bound = 1.1
expect = w - xp.clip(g, lower_bound, upper_bound)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(
optimizer_hooks.GradientHardClipping(lower_bound, upper_bound))
opt.update()
testing.assert_allclose(expect, w)
def test_hardclipping_cpu(self):
self.check_hardclipping()
@attr.gpu
def test_hardclipping_gpu(self):
self.target.to_gpu()
self.check_hardclipping()
testing.run_module(__name__, __file__)
| mit | Python |
eddf5b2280300e6ddc98eb8be65a87fddb5df98e | Add comment | datalogai/recurrentshop | examples/speed_test.py | examples/speed_test.py | from recurrentshop import*
from keras.layers import*
from keras.models import*
import numpy as np
import time
import sys
# Script for comparing performance of native keras and recurrentshop stacked RNN implementations
# We observe 20-30% speed ups on GPU
sys.setrecursionlimit(10000000)
# Params
rnn, rnn_cell = LSTM, LSTMCell
depth = 3
input_length = 1000
dim = 10
nb_epoch = 5
unroll = False
# Random data
x = np.random.random((1000, input_length, dim))
y = np.random.random((1000, dim))
# Native keras model
model = Sequential()
for i in range(depth):
model.add(rnn(dim, return_sequences=i != depth-1, input_shape=(input_length, dim), unroll=unroll, consume_less='gpu')) # We set consume_less = 'gpu' so that both models use the same LSTM implementation.
model.compile(loss='mse', optimizer='sgd')
print('Compiling...')
model.train_on_batch(x[:1], y[:1]) # force compile
start_time = time.time()
model.fit(x, y, nb_epoch=nb_epoch)
end_time = time.time()
# recurrentshop model
rc = RecurrentContainer(input_length=input_length, unroll=unroll)
for _ in range(depth):
rc.add(rnn_cell(dim, input_dim=dim))
model = Sequential()
model.add(rc)
model.compile(loss='mse', optimizer='sgd')
print('Compiling...')
model.train_on_batch(x[:1], y[:1]) # force compile
start_time = time.time()
model.fit(x, y, nb_epoch=nb_epoch)
end_time = time.time()
recurrentshop_time_taken = end_time - start_time
keras_time_taken = 42 * 5
speed_up = keras_time_taken / recurrentshop_time_taken
print('Time taken by native keras model: ' + str(int(keras_time_taken)) + ' seconds.')
print('Time taken by recurrentshop model: ' + str(int(recurrentshop_time_taken)) + ' seconds.')
print('Speed up:' + str(speed_up) + 'X')
| from recurrentshop import*
from keras.layers import*
from keras.models import*
import numpy as np
import time
import sys
# Script for comparing performance of native keras and recurrentshop stacked RNN implementations
sys.setrecursionlimit(10000000)
# Params
rnn, rnn_cell = LSTM, LSTMCell
depth = 3
input_length = 1000
dim = 10
nb_epoch = 5
unroll = False
# Random data
x = np.random.random((1000, input_length, dim))
y = np.random.random((1000, dim))
# Native keras model
model = Sequential()
for i in range(depth):
model.add(rnn(dim, return_sequences=i != depth-1, input_shape=(input_length, dim), unroll=unroll, consume_less='gpu')) # We set consume_less = 'gpu' so that both models use the same LSTM implementation.
model.compile(loss='mse', optimizer='sgd')
print('Compiling...')
model.train_on_batch(x[:1], y[:1]) # force compile
start_time = time.time()
model.fit(x, y, nb_epoch=nb_epoch)
end_time = time.time()
# recurrentshop model
rc = RecurrentContainer(input_length=input_length, unroll=unroll)
for _ in range(depth):
rc.add(rnn_cell(dim, input_dim=dim))
model = Sequential()
model.add(rc)
model.compile(loss='mse', optimizer='sgd')
print('Compiling...')
model.train_on_batch(x[:1], y[:1]) # force compile
start_time = time.time()
model.fit(x, y, nb_epoch=nb_epoch)
end_time = time.time()
recurrentshop_time_taken = end_time - start_time
keras_time_taken = 42 * 5
speed_up = keras_time_taken / recurrentshop_time_taken
print('Time taken by native keras model: ' + str(int(keras_time_taken)) + ' seconds.')
print('Time taken by recurrentshop model: ' + str(int(recurrentshop_time_taken)) + ' seconds.')
print('Speed up:' + str(speed_up) + 'X')
| mit | Python |
19b9b6472dc08a797fb659032f9ec11f27845db1 | clean up some comments | tigfox/legendary-sniffle | Halpy/Halpy.py | Halpy/Halpy.py | #!/usr/bin/env python
#this is the most simplistic irc bot, meant as a first example for simple alerting
import socket
import sys
import time
import requests
from xml.etree import ElementTree
#constants
server = "irc.freenode.org"
channel = "#Synculus"
nick = "Halpy"
alert = ":!halp"
ack = ":!ack"
catsource = "http://thecatapi.com/api/images/get?format=xml&api_key=NjgxMjU&size=full&results_per_page=1&type=gif"
def getCat():
response = requests.get(catsource)
tree = ElementTree.fromstring(response.content)
cat = tree.findtext("./data/images/image/url")
link = tree.findtext('.data/images/image/source_url')
return cat
#def connectToServer():
#make a socket
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("connecting to: %s" % server)
#connect
irc.connect((server, 6667))
irc.send("USER %s %s %s :I'm helping!\n" % (nick,nick,nick))
irc.send("NICK %s\n" % nick)
#should be on network now
print ("joined %s" % server)
#connectToServer()
while 1:
text=irc.recv(2048)
print text
#once you're on the network, join channel
if text.find(':Halpy MODE') != -1:
irc.send("JOIN %s\n" % channel)
print ("tried to join %s" % channel)
#watch for pings - don't ping timeout
if text.find('PING') != -1:
irc.send('PONG %s\r\n' % text.split()[1])
print "pong"
#watch for halp
if text.find(alert) != -1:
t = text.split(alert)
to = t[1].strip()
u = text.split('!')
reporter = u[0].strip()[1:]
irc.send("PRIVMSG %s :OMG I'll find halp %s! \r\n" % (channel,reporter))
irc.send('PRIVMSG %s :Please enjoy this cat while I alert the humans: %s \r\n'%(channel,getCat()))
print "saw halp" #fire off some rockets or something
print "%s needs halp with %s in %s" % (reporter,str(to),channel)
#refuse ops
if text.find('+o %s' % nick) != -1:
irc.send('MODE %s -o %s \r\n' % (channel,nick))
irc.send('PRIVMSG %s :It is probably stupid to op %s \r\n' % (channel,nick))
print "donut op me pls"
#allow !halp to be acknowledged
if text.find(':!ack') != -1:
u = text.split('!')
acker = u[0].strip()[1:]
irc.send('PRIVMSG %s :Thanks %s! \r\n' % (channel,acker))
print "%s ack'd that shit"% acker
| #!/usr/bin/env python
#this is the most simplistic irc bot, meant as a first example for simple alerting
import socket
import sys
import time
import requests
from xml.etree import ElementTree
#constants
#server = "irc.etsycorp.com"
server = "irc.freenode.org"
channel = "#Synculus"
nick = "Halpy"
alert = ":!halp"
ack = ":!ack"
catsource = "http://thecatapi.com/api/images/get?format=xml&api_key=NjgxMjU&size=full&results_per_page=1&type=gif"
def getCat():
response = requests.get(catsource)
tree = ElementTree.fromstring(response.content)
cat = tree.findtext("./data/images/image/url")
link = tree.findtext('.data/images/image/source_url')
return cat
#def connectToServer():
#make a socket
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("connecting to: %s" % server)
#connect
irc.connect((server, 6667))
irc.send("USER %s %s %s :I'm helping!\n" % (nick,nick,nick))
irc.send("NICK %s\n" % nick)
#should be on network now
print ("joined %s" % server)
#connectToServer()
while 1:
text=irc.recv(2048)
print text
#once you're on the network, join channel
if text.find(':Halpy MODE') != -1:
irc.send("JOIN %s\n" % channel)
print ("tried to join %s" % channel)
#watch for pings - don't ping timeout
if text.find('PING') != -1:
irc.send('PONG %s\r\n' % text.split()[1])
print "pong"
#watch for halp
if text.find(alert) != -1:
t = text.split(alert)
to = t[1].strip()
u = text.split('!')
reporter = u[0].strip()[1:]
irc.send("PRIVMSG %s :OMG I'll find halp %s! \r\n" % (channel,reporter))
irc.send('PRIVMSG %s :Please enjoy this cat while I alert the humans: %s \r\n'%(channel,getCat()))
print "saw halp" #fire off some rockets or something
print "%s needs halp with %s in %s" % (reporter,str(to),channel)
#refuse ops
if text.find('+o %s' % nick) != -1:
irc.send('MODE %s -o %s \r\n' % (channel,nick))
irc.send('PRIVMSG %s :It is probably stupid to op %s \r\n' % (channel,nick))
print "donut op me pls"
#allow !halp to be acknowledged
if text.find(':!ack') != -1:
u = text.split('!')
acker = u[0].strip()[1:]
irc.send('PRIVMSG %s :Thanks %s! \r\n' % (channel,acker))
print "%s ack'd that shit"% acker
| mit | Python |
3ecf2c0e0ce723ed1f908bc640b86cc3309fe72b | Bump to 4.2.0 | vimalloc/flask-jwt-extended | flask_jwt_extended/__init__.py | flask_jwt_extended/__init__.py | from .jwt_manager import JWTManager
from .utils import create_access_token
from .utils import create_refresh_token
from .utils import current_user
from .utils import decode_token
from .utils import get_csrf_token
from .utils import get_current_user
from .utils import get_jti
from .utils import get_jwt
from .utils import get_jwt_header
from .utils import get_jwt_identity
from .utils import get_jwt_request_location
from .utils import get_unverified_jwt_headers
from .utils import set_access_cookies
from .utils import set_refresh_cookies
from .utils import unset_access_cookies
from .utils import unset_jwt_cookies
from .utils import unset_refresh_cookies
from .view_decorators import jwt_required
from .view_decorators import verify_jwt_in_request
__version__ = "4.2.0"
| from .jwt_manager import JWTManager
from .utils import create_access_token
from .utils import create_refresh_token
from .utils import current_user
from .utils import decode_token
from .utils import get_csrf_token
from .utils import get_current_user
from .utils import get_jti
from .utils import get_jwt
from .utils import get_jwt_header
from .utils import get_jwt_identity
from .utils import get_jwt_request_location
from .utils import get_unverified_jwt_headers
from .utils import set_access_cookies
from .utils import set_refresh_cookies
from .utils import unset_access_cookies
from .utils import unset_jwt_cookies
from .utils import unset_refresh_cookies
from .view_decorators import jwt_required
from .view_decorators import verify_jwt_in_request
__version__ = "4.1.0"
| mit | Python |
45fb01c1587cb9dba546d3d8001c782d59bed5a3 | Update median.py | thomi137/Python-Samples | median.py | median.py | #!/usr/bin/python
#########################################################################################
# Sample program to read csv data generated from MS Excel and put it into a single table
# SQLite DB. Negative Number formatting is stripped and appended with a minus ('-') sign
#
# Copyright 2013 by Thomas Prosser, thomas@prosser.ch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#########################################################################################
-*- coding:utf-8 -*-
from random import Random
generator = Random()
x = [generator.randint(1,1000) for i in range(1000)]
k = ((len(x)//2) + (len(x)//n +1))//2 if len(x)%2 else len(x)//2
def quickselect(array, k):
pivot = generator.choice(array)
index = array.index(pivot)
a1, a2, a3 = [], [], []
# do three list comprehensions run faster than one for loop???
for a in array:
if a < pivot: a1.append(a)
if a == pivot: a2.append(a)
if a > pivot: a3.append(a)
if k < len(a1): return quickselect(a1, k)
if k > len(a1) + len(a2): return quickselect(a3, k - len(a1) - len(a2))
return pivot
if __name__ == '__main__':
median = quickselect(x, k)
print median
| #!/usr/bin/python
-*- coding:utf-8 -*-
from random import Random
generator = Random()
x = [generator.randint(1,1000) for i in range(1000)]
k = ((len(x)//2) + (len(x)//n +1))//2 if len(x)%2 else len(x)//2
def quickselect(array, k):
pivot = generator.choice(array)
index = array.index(pivot)
a1, a2, a3 = [], [], []
# do three list comprehensions run faster than one for loop???
for a in array:
if a < pivot: a1.append(a)
if a == pivot: a2.append(a)
if a > pivot: a3.append(a)
if k < len(a1): return quickselect(a1, k)
if k > len(a1) + len(a2): return quickselect(a3, k - len(a1) - len(a2))
return pivot
if __name__ == '__main__':
median = quickselect(x, k)
print median
| apache-2.0 | Python |
ed0d94a1f05f93ba59cb69a181b5665793e4d8d4 | Complete backtracking sol | bowen0701/algorithms_data_structures | lc0047_permutations_ii.py | lc0047_permutations_ii.py | """Leetcode 47. Permutations II
Medium
URL: https://leetcode.com/problems/permutations-ii/
Given a collection of numbers that might contain duplicates,
return all possible unique permutations.
Example:
Input: [1,1,2]
Output:
[
[1,1,2],
[1,2,1],
[2,1,1]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, is_used, nums):
if len(temp) == len(nums):
# Once a permutation is completed, shallow copy it to result.
result.append(temp[:])
return None
for i in range(len(nums)):
if (is_used[i] or
(i > 0 and nums[i] == nums[i - 1] and is_used[i - 1])):
continue
is_used[i] = True
temp.append(nums[i])
self._backtrack(result, temp, is_used, nums)
# Backtracking.
is_used[i] = False
temp.pop()
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
Time complexity: O(n*n!), where
- n is the length of nums for copying temp,
- n! is for permutation.
Space complexity: O(n*n!).
"""
# Sort to further avoid duplicates.
nums.sort()
result = []
temp = []
# Use used array to memorize usage in backtracking.
is_used = [False] * len(nums)
self._backtrack(result, temp, is_used, nums)
return result
def main():
# Output: [[1,1,2],[1,2,1],[2,1,1]]
nums = [1,1,2,4]
print SolutionBacktrack().permuteUnique(nums)
if __name__ == '__main__':
main()
| """Leetcode 47. Permutations II
Medium
URL: https://leetcode.com/problems/permutations-ii/
Given a collection of numbers that might contain duplicates,
return all possible unique permutations.
Example:
Input: [1,1,2]
Output:
[
[1,1,2],
[1,2,1],
[2,1,1]
]
"""
class Solution(object):
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
7535bd611b26fa81944058c49e7238bd67a5f577 | Exclude unnecessary fields for Enjaz | enjaz/enjaz,enjaz/enjaz,osamak/student-portal,osamak/student-portal,enjaz/enjaz,osamak/student-portal,enjaz/enjaz,osamak/student-portal,osamak/student-portal,enjaz/enjaz | forms_builder/wrapper/forms.py | forms_builder/wrapper/forms.py | from django import forms
from django.forms.models import inlineformset_factory
from forms_builder.forms.models import Form, Field
class FormToBuildForm(forms.ModelForm):
"""
A form that is used to create or edit an instance of ``forms.models.Form``.
"""
class Meta:
model = Form
exclude = ('sites', 'redirect_url', 'login_required', 'send_email', 'email_from',
'email_copies', 'email_subject', 'email_message')
# A form set to manage adding, modifying, or deleting fields of a form
FieldFormSet = inlineformset_factory(Form, Field, exclude=('slug',), extra=1, can_delete=True) | from django import forms
from django.forms.models import inlineformset_factory
from forms_builder.forms.models import Form, Field
class FormToBuildForm(forms.ModelForm):
"""
A form that is used to create or edit an instance of ``forms.models.Form``.
"""
class Meta:
model = Form
# A form set to manage adding, modifying, or deleting fields of a form
FieldFormSet = inlineformset_factory(Form, Field, exclude=('slug',), extra=1, can_delete=True) | agpl-3.0 | Python |
953f99787e3578f4338752ce3099d530d021e725 | Add special case to add Side/SideOnly to difflist | Theerapak/MinecraftForge,brubo1/MinecraftForge,CrafterKina/MinecraftForge,RainWarrior/MinecraftForge,shadekiller666/MinecraftForge,Mathe172/MinecraftForge,Ghostlyr/MinecraftForge,Zaggy1024/MinecraftForge,mickkay/MinecraftForge,karlthepagan/MinecraftForge,fcjailybo/MinecraftForge,simon816/MinecraftForge,ThiagoGarciaAlves/MinecraftForge,jdpadrnos/MinecraftForge,bonii-xx/MinecraftForge,luacs1998/MinecraftForge,blay09/MinecraftForge,Vorquel/MinecraftForge,dmf444/MinecraftForge | fml/generatechangedfilelist.py | fml/generatechangedfilelist.py | import sys
import os
import subprocess, shlex
mcp_root = os.path.abspath(sys.argv[1])
sys.path.append(os.path.join(mcp_root,"runtime"))
from filehandling.srgshandler import parse_srg
def cmdsplit(args):
if os.sep == '\\':
args = args.replace('\\', '\\\\')
return shlex.split(args)
def main():
list_file = os.path.abspath(sys.argv[2])
with open(list_file, 'w') as fh:
write_changed(fh, os.path.join(mcp_root,"temp","client.md5"), os.path.join(mcp_root,"temp","client_reobf.md5"), 'minecraft')
write_changed(fh, os.path.join(mcp_root,"temp","server.md5"), os.path.join(mcp_root,"temp","server_reobf.md5"), 'minecraft_server')
def write_changed(fh, pre, post, name):
if not os.path.isfile(pre) or not os.path.isfile(post):
print 'MD5s Missing! Can not extract %s changed files' % name
return
cmd = 'diff --unchanged-group-format='' --old-group-format='' --new-group-format=\'%%>\' --changed-group-format=\'%%>\' %s %s' % (pre, post)
process = subprocess.Popen(cmdsplit(cmd), stdout=subprocess.PIPE, bufsize=-1)
difflist,_= process.communicate()
srg_data = parse_srg(os.path.join(mcp_root,"temp","client_rg.srg"))
classes = {}
for row in srg_data['CL']:
classes[row['deobf_name']] = row['obf_name']
for diff in difflist.splitlines():
diffrow=diff.strip().split()
clazz=diffrow[0]
if clazz in classes:
clazz=classes[clazz]
if clazz.startswith("net/minecraft/src/"):
clazz=clazz[len("net/minecraft/src/"):]
fh.write("%s/%s.class\n" %(name,clazz))
fh.write("%s/cpw/mods/fml/common/asm/SideOnly.class" % name)
fh.write("%s/cpw/mods/fml/common/Side.class" % name)
if __name__ == '__main__':
main()
| import sys
import os
import subprocess, shlex
mcp_root = os.path.abspath(sys.argv[1])
sys.path.append(os.path.join(mcp_root,"runtime"))
from filehandling.srgshandler import parse_srg
def cmdsplit(args):
if os.sep == '\\':
args = args.replace('\\', '\\\\')
return shlex.split(args)
def main():
list_file = os.path.abspath(sys.argv[2])
with open(list_file, 'w') as fh:
write_changed(fh, os.path.join(mcp_root,"temp","client.md5"), os.path.join(mcp_root,"temp","client_reobf.md5"), 'minecraft')
write_changed(fh, os.path.join(mcp_root,"temp","server.md5"), os.path.join(mcp_root,"temp","server_reobf.md5"), 'minecraft_server')
def write_changed(fh, pre, post, name):
if not os.path.isfile(pre) or not os.path.isfile(post):
print 'MD5s Missing! Can not extract %s changed files' % name
return
cmd = 'diff --unchanged-group-format='' --old-group-format='' --new-group-format=\'%%>\' --changed-group-format=\'%%>\' %s %s' % (pre, post)
process = subprocess.Popen(cmdsplit(cmd), stdout=subprocess.PIPE, bufsize=-1)
difflist,_= process.communicate()
srg_data = parse_srg(os.path.join(mcp_root,"temp","client_rg.srg"))
classes = {}
for row in srg_data['CL']:
classes[row['deobf_name']] = row['obf_name']
for diff in difflist.splitlines():
diffrow=diff.strip().split()
clazz=diffrow[0]
if clazz in classes:
clazz=classes[clazz]
if clazz.startswith("net/minecraft/src/"):
clazz=clazz[len("net/minecraft/src/"):]
fh.write("%s/%s.class\n" %(name,clazz))
if __name__ == '__main__':
main()
| lgpl-2.1 | Python |
892bca4fbcb084b78b1b902c1bc1e2634f4e3d2f | use self.stdout.write | cordery/django-countries-plus,cordery/django-countries-plus | countries_plus/management/commands/update_countries_plus.py | countries_plus/management/commands/update_countries_plus.py | from countries_plus.utils import update_geonames_data
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Updates the Countries Plus database from geonames.org'
def handle(self, *args, **options):
num_updated, num_created = update_geonames_data()
self.stdout.write(
"Countries Plus data has been succesfully updated from geonames.org. "
"%s countries were updated, %s countries were created." % (num_updated, num_created))
| from django.core.management.base import BaseCommand
from countries_plus.utils import update_geonames_data
class Command(BaseCommand):
help = 'Updates the Countries Plus database from geonames.org'
def handle(self, *args, **options):
num_updated, num_created = update_geonames_data()
print "Countries Plus data has been succesfully updated from geonames.org. " \
"%s countries were updated, %s countries were created." % (num_updated, num_created)
| mit | Python |
496829572ecfbe485a6c086684179927e2787e5c | fix test | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/icds/tests/serializers/test_hosted_ccz_serializer.py | custom/icds/tests/serializers/test_hosted_ccz_serializer.py | from __future__ import absolute_import
from __future__ import unicode_literals
import mock
from django.test import TestCase
from custom.icds.models import (
HostedCCZ,
HostedCCZLink,
)
from custom.icds.serializers import HostedCCZSerializer
BUILD = {
'build_profiles': {
'12345': {'name': 'Dummy Build Profile'},
},
}
class TestHostedCCZSerializer(TestCase):
raw_password = "123456"
@classmethod
def setUpClass(cls):
super(TestHostedCCZSerializer, cls).setUpClass()
cls.link = HostedCCZLink.objects.create(username="username", password="password",
identifier="link123", domain="test")
cls.hosted_ccz = HostedCCZ(link=cls.link, app_id="dummy", version=12, profile_id="12345",
file_name="my file")
@mock.patch('custom.icds.models.get_build_doc_by_version', lambda *args: BUILD)
def test_data(self):
self.assertEqual(
HostedCCZSerializer(self.hosted_ccz, context={'app_names': {
'dummy': 'Dummy App',
}}).data,
{'app_name': 'Dummy App', 'file_name': self.hosted_ccz.file_name,
'profile_name': 'Dummy Build Profile',
'app_id': 'dummy',
'ccz_details': {'name': self.hosted_ccz.file_name,
'download_url': '/a/test/ccz/hostings/None/download/'},
'link_name': self.link.identifier, 'link': self.link.pk, 'version': 12, 'id': None, 'note': '',
'status': 'pending'}
)
@classmethod
def tearDownClass(cls):
cls.link.delete()
super(TestHostedCCZSerializer, cls).tearDownClass()
| from __future__ import absolute_import
from __future__ import unicode_literals
import mock
from django.test import TestCase
from custom.icds.models import (
HostedCCZ,
HostedCCZLink,
)
from custom.icds.serializers import HostedCCZSerializer
BUILD = {
'build_profiles': {
'12345': {'name': 'Dummy Build Profile'},
},
}
class TestHostedCCZSerializer(TestCase):
raw_password = "123456"
@classmethod
def setUpClass(cls):
super(TestHostedCCZSerializer, cls).setUpClass()
cls.link = HostedCCZLink.objects.create(username="username", password="password",
identifier="link123", domain="test")
cls.hosted_ccz = HostedCCZ(link=cls.link, app_id="dummy", version=12, profile_id="12345",
file_name="my file")
@mock.patch('custom.icds.models.get_build_doc_by_version', lambda *args: BUILD)
def test_data(self):
self.assertEqual(
HostedCCZSerializer(self.hosted_ccz, context={'app_names': {
'dummy': 'Dummy App',
}}).data,
{'app_name': 'Dummy App', 'file_name': self.hosted_ccz.file_name,
'profile_name': 'Dummy Build Profile',
'app_id': 'dummy',
'ccz_details': {'name': self.hosted_ccz.file_name,
'download_url': '/a/test/ccz/hostings/None/download/'},
'link_name': self.link.identifier, 'link': self.link.pk, 'version': 12, 'id': None, 'note': ''}
)
@classmethod
def tearDownClass(cls):
cls.link.delete()
super(TestHostedCCZSerializer, cls).tearDownClass()
| bsd-3-clause | Python |
d71d76bc501ec93ba39b5aa94aeaee444145e123 | Fix forms without read only fields. | django-leonardo/django-leonardo,django-leonardo/django-leonardo,django-leonardo/django-leonardo,django-leonardo/django-leonardo | leonardo/module/media/admin/file/forms.py | leonardo/module/media/admin/file/forms.py |
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext as _
from leonardo.forms import SelfHandlingModelForm
from leonardo.forms.fields.common import UserField
from leonardo.module.media.fields.folder import FolderField
from leonardo.module.media.models import File
class FileForm(SelfHandlingModelForm):
folder = FolderField(required=False)
owner = UserField(required=False)
def __init__(self, *args, **kwargs):
super(FileForm, self).__init__(*args, **kwargs)
if 'sha1' in self.fields:
self.fields['sha1'].widget.attrs['readonly'] = True
if '_file_size' in self.fields:
self.fields['_file_size'].widget.attrs['readonly'] = True
self.init_layout()
tabs = {
'File': {
'name': _('File'),
'fields': (
'id',
'name',
'original_filename',
'file',
'folder',
'owner'
)
},
'Advanced': {
'name': _('Advanced'),
'fields': (
'is_public', 'sha1', '_file_size'
)
}
}
class Meta:
model = File
exclude = ()
FileFormset = modelformset_factory(
File, form=FileForm, can_delete=True, extra=1)
|
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext as _
from leonardo.forms import SelfHandlingModelForm
from leonardo.forms.fields.common import UserField
from leonardo.module.media.fields.folder import FolderField
from leonardo.module.media.models import File
class FileForm(SelfHandlingModelForm):
folder = FolderField(required=False)
owner = UserField(required=False)
def __init__(self, *args, **kwargs):
super(FileForm, self).__init__(*args, **kwargs)
self.fields['sha1'].widget.attrs['readonly'] = True
self.fields['_file_size'].widget.attrs['readonly'] = True
self.init_layout()
tabs = {
'File': {
'name': _('File'),
'fields': (
'id',
'name',
'original_filename',
'file',
'folder',
'owner'
)
},
'Advanced': {
'name': _('Advanced'),
'fields': (
'is_public', 'sha1', '_file_size'
)
}
}
class Meta:
model = File
exclude = ()
FileFormset = modelformset_factory(
File, form=FileForm, can_delete=True, extra=1)
| bsd-3-clause | Python |
4a26ed066bb5af254bb5aedce615dd502e1f8a79 | Update server.py | vanzhiganov/pySocket | server/server.py | server/server.py | import SocketServer
import time
import config
class TCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print "{} wrote:".format(self.client_address[0])
print "%s %s" % (int(time.time()), self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
if __name__ == "__main__":
server = SocketServer.TCPServer((config.config['host'], config.config['port']), TCPHandler)
server.serve_forever()
| import SocketServer
import time
import config
class TCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print "{} wrote:".format(self.client_address[0])
print "%s %s" % (int(time.time()), self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
if __name__ == "__main__":
server = SocketServer.TCPServer((config['host'], config['port']), TCPHandler)
server.serve_forever() | unlicense | Python |
a1d16d931772c52c214706da6242e470ee9a29d1 | Extend User model | twschum/mix-mind,twschum/mix-mind,twschum/mix-mind,twschum/mix-mind | models.py | models.py | from flask_sqlalchemy import SQLAlchemy
import util
db = SQLAlchemy()
class User(db.Model):
""" User where email address is account key
"""
email = db.Column(db.String(120), primary_key=True, unique=True, nullable=False)
password = db.Column(db.String)
uuid_ = db.Column(db.String(36), unique=True)
username = db.Column(db.String(80), nullable=True)
authenticated = db.Column(db.Boolean, default=False)
role = db.Column(db.String(), default="")
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self.uuid_ = util.get_uuid()
if not self.role:
self.role = "customer"
def __repr__(self):
return '<User {}>'.format(self.username)
# Implement flask-login required functions
@property
def is_authenticated(self):
"""This property should return True if the user is authenticated,
i.e. they have provided valid credentials.
(Only authenticated users will fulfill the criteria of login_required.)
"""
return self.authenticated
@property
def is_active(self):
"""This property should return True if this is an active user -
in addition to being authenticated, they also have activated their account,
not been suspended, or any condition your application has for rejecting an account.
Inactive accounts may not log in (without being forced of course).
"""
return True
@property
def is_anonymous(self):
"""This property should return True if this is an anonymous user.
(Actual users should return False instead.)
"""
return False
def get_id(self):
"""This method must return a unicode that uniquely identifies this user,
and can be used to load the user from the user_loader callback.
Note that this must be a unicode - if the ID is natively an int
or some other type, you will need to convert it to unicode.
"""
return self.email
| from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
uuid_ = db.Column(db.String(36), primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
authenticated = db.Column(db.Boolean, default=False)
def __repr__(self):
return '<User {}>'.format(self.username)
# Implement flask-login required functions
@property
def is_authenticated(self):
"""This property should return True if the user is authenticated,
i.e. they have provided valid credentials.
(Only authenticated users will fulfill the criteria of login_required.)
"""
self.authenticated
@property
def is_active(self):
"""This property should return True if this is an active user -
in addition to being authenticated, they also have activated their account,
not been suspended, or any condition your application has for rejecting an account.
Inactive accounts may not log in (without being forced of course).
"""
return True
@property
def is_anonymous(self):
"""This property should return True if this is an anonymous user.
(Actual users should return False instead.)
"""
return False
def get_id(self):
"""This method must return a unicode that uniquely identifies this user,
and can be used to load the user from the user_loader callback.
Note that this must be a unicode - if the ID is natively an int
or some other type, you will need to convert it to unicode.
"""
return self.uuid_
| apache-2.0 | Python |
b532b66fd552fd886b4c03633d6dab9f96fb31d8 | Remove helper function from models | patrickspencer/lytics,patrickspencer/lytics,patrickspencer/lytics,patrickspencer/lytics | models.py | models.py | # -*- coding: utf-8 -*-
"""
models
~~~~~~
Model definitions and class methods
:copyright: (c) 2016 by Patrick Spencer.
:license: Apache 2.0, see LICENSE for more details.
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Boolean, Date, Time, \
String, ForeignKey, Float
Base = declarative_base()
class ExpenditureCategory(Base):
__tablename__ = 'finances_category'
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr__(self):
return "<User(username='%s')>" % (self.username)
class Expenditure(Base):
__tablename__ = 'finances_expenditure'
id = Column(Integer, primary_key=True)
date = Column(Date)
time = Column(Time)
description = Column(String)
cost = Column(Float)
category_id = Column(Integer, ForeignKey(ExpenditureCategory.id), nullable=True)
def __repr__(self):
return "<User(date='%s', description='%s', cost='%s')>" % (self.date, self.description, self.cost)
| # -*- coding: utf-8 -*-
"""
models
~~~~~~
Model definitions and class methods
:copyright: (c) 2016 by Patrick Spencer.
:license: Apache 2.0, see LICENSE for more details.
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Boolean, Date, Time, \
String, ForeignKey, Float
Base = declarative_base()
class ExpenditureCategory(Base):
__tablename__ = 'finances_category'
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr__(self):
return "<User(username='%s')>" % (self.username)
class Expenditure(Base):
__tablename__ = 'finances_expenditure'
id = Column(Integer, primary_key=True)
date = Column(Date)
time = Column(Time)
description = Column(String)
cost = Column(Float)
category_id = Column(Integer, ForeignKey(ExpenditureCategory.id), nullable=True)
def __repr__(self):
return "<User(date='%s', description='%s', cost='%s')>" % (self.date, self.description, self.cost)
def month_bounds(self, year, month):
"""
Returns a tuple of datetime objects (month_start,month_end) given a year and month.
Both params are strings because we want month to be a two digit month representation
and python doesn't handle leading zeros in integers as we want.
:param year: four digit year as a string e.g. "2016"
:param month: 2 digit month as a string e.g. 2 for February, 11 for November
"""
year = int(year)
month = int(month)
month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d')
# days_in_month returns a tuple(weekday, days) where
# weekday is the eekday the month starts on and days is the number of days in the month
days_in_month = calendar.monthrange(year,month)
month_end = month_start + timedelta(days=days_in_month[1]-1)
return (month_start, month_end)
| apache-2.0 | Python |
eba1a2140762502656a514759d925e502de3e026 | Remove AccountFilter. | pcapriotti/pledger | pledger/filters.py | pledger/filters.py | from pledger.entry import Entry
class FilterCollection(object):
def __init__(self):
self.filters = { }
def add_filter(self, filter, level = 0):
self.filters.setdefault(level, [])
self.filters[level].append(filter)
def apply(self, transaction, account, amount):
entries = [Entry(account, amount)]
levels = self.filters.keys()
levels.sort()
for level in levels:
filters = self.filters[level]
new_entries = []
for filter in filters:
for entry in entries:
new_entries += filter.apply(transaction, entry)
entries = new_entries
return entries
class Predicate(object):
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, *args):
return self.predicate(*args)
def __and__(self, other):
@Predicate
def result(*args):
return self(*args) and other(*args)
return result
def __or__(self, other):
@Predicate
def result(*args):
return self(*args) or other(*args)
return result
@classmethod
def has_account(cls, account):
@cls
def result(transaction, entry):
return entry.account == account
return result
class Generator(object):
def __init__(self, generator):
self.generator = generator
def __call__(self, *args):
return self.generator(*args)
def __add__(self, other):
@Generator
def result(*args):
return self(*args) + other(*args)
return result
Generator.identity = Generator(lambda x: [x])
Generator.null = Generator(lambda x: [])
class RuleFilter(object):
def __init__(self, predicate, generator, complement = Generator.identity):
self.predicate = predicate
self.generators = {
True : generator,
False : complement }
def apply(self, transaction, entry):
gen = self.generators[self.predicate(transaction, entry)]
return gen(entry)
class Filter(RuleFilter):
def __init__(self, predicate):
super(Filter, self).__init__(predicate, Generator.identity, Generator.null)
| from pledger.entry import Entry
class FilterCollection(object):
def __init__(self):
self.filters = { }
def add_filter(self, filter, level = 0):
self.filters.setdefault(level, [])
self.filters[level].append(filter)
def apply(self, transaction, account, amount):
entries = [Entry(account, amount)]
levels = self.filters.keys()
levels.sort()
for level in levels:
filters = self.filters[level]
new_entries = []
for filter in filters:
for entry in entries:
new_entries += filter.apply(transaction, entry)
entries = new_entries
return entries
class Filter(object):
pass
class Predicate(object):
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, *args):
return self.predicate(*args)
def __and__(self, other):
@Predicate
def result(*args):
return self(*args) and other(*args)
return result
def __or__(self, other):
@Predicate
def result(*args):
return self(*args) or other(*args)
return result
class Generator(object):
def __init__(self, generator):
self.generator = generator
def __call__(self, *args):
return self.generator(*args)
def __add__(self, other):
@Generator
def result(*args):
return self(*args) + other(*args)
return result
Generator.identity = Generator(lambda x: [x])
Generator.null = Generator(lambda x: [])
class RuleFilter(Filter):
def __init__(self, predicate, generator, complement = Generator.identity):
self.predicate = predicate
self.generators = {
True : generator,
False : complement }
def apply(self, transaction, entry):
gen = self.generators[self.predicate(transaction, entry)]
return gen(entry)
class AccountFilter(RuleFilter):
def __init__(self, account):
@Predicate
def predicate(transaction, entry):
return entry.account == account
super(AccountFilter, self).__init__(predicate, Generator.identity, Generator.null)
| mit | Python |
ebbaebc608d923f437f396957d71615493c13055 | Change feature return | harvitronix/five-video-classification-methods | extractor.py | extractor.py | from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model, load_model
from keras.layers import Input
import numpy as np
class Extractor():
def __init__(self, weights=None):
"""Either load pretrained from imagenet, or load our saved
weights from our own training."""
self.weights = weights # so we can check elsewhere which model
if weights is None:
# Get model with pretrained weights.
base_model = InceptionV3(
weights='imagenet',
include_top=True
)
# We'll extract features at the final pool layer.
self.model = Model(
inputs=base_model.input,
outputs=base_model.get_layer('avg_pool').output
)
else:
# Load the model first.
self.model = load_model(weights)
# Then remove the top so we get features not predictions.
# From: https://github.com/fchollet/keras/issues/2371
self.model.layers.pop()
self.model.layers.pop() # two pops to get to pool layer
self.model.outputs = [self.model.layers[-1].output]
self.model.output_layers = [self.model.layers[-1]]
self.model.layers[-1].outbound_nodes = []
def extract(self, image_path):
img = image.load_img(image_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0]
else:
# For loaded network:
features = features[0]
return features
| from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model, load_model
from keras.layers import Input
import numpy as np
class Extractor():
def __init__(self, weights=None):
"""Either load pretrained from imagenet, or load our saved
weights from our own training."""
self.weights = weights # so we can check elsewhere which model
if weights is None:
# Get model with pretrained weights.
base_model = InceptionV3(
weights='imagenet',
include_top=True
)
# We'll extract features at the final pool layer.
self.model = Model(
inputs=base_model.input,
outputs=base_model.get_layer('avg_pool').output
)
else:
# Load the model first.
self.model = load_model(weights)
# Then remove the top so we get features not predictions.
# From: https://github.com/fchollet/keras/issues/2371
self.model.layers.pop()
self.model.layers.pop() # two pops to get to pool layer
self.model.outputs = [self.model.layers[-1].output]
self.model.output_layers = [self.model.layers[-1]]
self.model.layers[-1].outbound_nodes = []
def extract(self, image_path):
img = image.load_img(image_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0][0][0]
else:
# For loaded network:
features = features[0]
return features
| mit | Python |
02ec0611a708a293fe4d5056152d7a229e908690 | Add option to save at intermediary points. | e-koch/Phys-595 | project_code/bulk_fitting.py | project_code/bulk_fitting.py |
'''
Bulk spectral line fitting with SDSS galaxy spectra
'''
import os
from astropy.io import fits
from pandas import DataFrame
# Bring in the package funcs
from specfit import do_specfit
from download_spectra import download_spectra
def bulk_fit(obs_file, output_file, keep_spectra=False, split_save=True,
num_save=10):
'''
Downloads files based off of the entries in the given file, performs
spectral line fitting and saves the results to a FITS table.
'''
# Open the file
data_file = fits.open(obs_file)
spectra_data = data_file[1].data
del data_file
num_spectra = spectra_data.size
save_nums = [(num_spectra/num_save)*(i+1) for i in range(num_save)]
save_nums[-1] = num_spectra
for i in range(num_spectra):
spec_info = spectra_data[i]
# Download the spectrum
spec_name = \
download_spectra(spec_info['PLATE'], spec_info['FIBERID'],
spec_info['MJD'], spec_info['SURVEY'])
spec_df = do_specfit(spec_name, verbose=False)
if i == 0:
df = DataFrame(spec_df, columns=[spec_name[:-5]])
else:
df[spec_name[:-5]] = spec_df
if split_save and i in save_nums:
posn = [j for j, x in enumerate(save_nums) if x == i][0]
df.to_csv(output_file[:-4]+"_"+str(posn+1))
if not keep_spectra:
os.system('rm ' + spec_name)
if not split_save:
df.to_csv(output_file)
return
if __name__ == "__main__":
import sys
input_file = str(sys.argv[1])
output_file = str(sys.argv[2])
bulk_fit(input_file, output_file)
|
'''
Bulk spectral line fitting with SDSS galaxy spectra
'''
import os
from astropy.io import fits
from pandas import DataFrame
# Bring in the package funcs
from specfit import do_specfit
from download_spectra import download_spectra
def bulk_fit(obs_file, output_file, keep_spectra=False):
'''
Downloads files based off of the entries in the given file, performs
spectral line fitting and saves the results to a FITS table.
'''
# Open the file
data_file = fits.open(obs_file)
spectra_data = data_file[1].data
del data_file
num_spectra = spectra_data.size
for i in range(num_spectra):
spec_info = spectra_data[i]
# Download the spectrum
spec_name = \
download_spectra(spec_info['PLATE'], spec_info['FIBERID'],
spec_info['MJD'], spec_info['SURVEY'])
spec_df = do_specfit(spec_name, verbose=False)
if i == 0:
df = DataFrame(spec_df, columns=[spec_name[:-5]])
else:
df[spec_name[:-5]] = spec_df
if not keep_spectra:
os.system('rm ' + spec_name)
df.to_csv(output_file)
return
if __name__ == "__main__":
import sys
input_file = str(sys.argv[1])
output_file = str(sys.argv[2])
bulk_fit(input_file, output_file)
| mit | Python |
0bf34225dc687270c263bfc9f4555c9709eef1ad | use req.GET instead of deprecated req.REQUEST | misli/django-fas | fas/views.py | fas/views.py | from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as auth_login, logout as auth_logout, authenticate, REDIRECT_FIELD_NAME
from django.core.urlresolvers import resolve
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url, redirect
from django.utils.translation import ugettext_lazy as _
try:
from urllib.parse import urlsplit, parse_qs, urlencode, urlunsplit
except ImportError:
# python 2 compatible
from urllib import urlencode
from urlparse import urlsplit, parse_qs, urlunsplit
from .consumer import Consumer, SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
STATUS_MESSAGES = {
SUCCESS: _('Login successful.'),
CANCEL: _('Login canceled.'),
FAILURE: _('Login failed.'),
SETUP_NEEDED: _('Login needs setup.'),
}
def redirect_next(request, field_name, settings_name):
try:
# get safe url from user input
url = request.GET[field_name]
url = urlunsplit(('','')+urlsplit(url)[2:])
except:
url = resolve_url(getattr(settings, settings_name, '/'))
return HttpResponseRedirect(url)
def login(request, redirect_field_name=REDIRECT_FIELD_NAME,
complete_view='fas-complete'):
complete_url = resolve_url(complete_view)
if redirect_field_name in request.GET:
(scheme, netloc, path, query_string, fragment) = urlsplit(complete_url)
fields = parse_qs(query_string)
fields[redirect_field_name] = request.GET[redirect_field_name]
complete_url = urlunsplit(('', '', path, urlencode(fields), fragment))
return redirect(Consumer(request).get_url(complete_url=complete_url))
def complete(request, redirect_field_name=REDIRECT_FIELD_NAME):
response = Consumer(request).complete()
message = STATUS_MESSAGES[response.status]
user = authenticate(response=response)
if user:
auth_login(request, user)
messages.success(request, message)
return redirect_next(request, redirect_field_name, 'LOGIN_REDIRECT_URL')
else:
messages.error(request, message)
return redirect_next(request, redirect_field_name, 'LOGIN_FAIL_REDIRECT_URL')
def logout(request, redirect_field_name=REDIRECT_FIELD_NAME):
auth_logout(request)
messages.success(request, _('Successfully logged out.'))
return redirect_next(request, redirect_field_name, 'LOGOUT_REDIRECT_URL')
| from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as auth_login, logout as auth_logout, authenticate, REDIRECT_FIELD_NAME
from django.core.urlresolvers import resolve
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url, redirect
from django.utils.translation import ugettext_lazy as _
try:
from urllib.parse import urlsplit, parse_qs, urlencode, urlunsplit
except ImportError:
# python 2 compatible
from urllib import urlencode
from urlparse import urlsplit, parse_qs, urlunsplit
from .consumer import Consumer, SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
STATUS_MESSAGES = {
SUCCESS: _('Login successful.'),
CANCEL: _('Login canceled.'),
FAILURE: _('Login failed.'),
SETUP_NEEDED: _('Login needs setup.'),
}
def redirect_next(request, field_name, settings_name):
try:
# get safe url from user input
url = request.REQUEST[field_name]
url = urlunsplit(('','')+urlsplit(url)[2:])
except:
url = resolve_url(getattr(settings, settings_name, '/'))
return HttpResponseRedirect(url)
def login(request, redirect_field_name=REDIRECT_FIELD_NAME,
complete_view='fas-complete'):
complete_url = resolve_url(complete_view)
if redirect_field_name in request.REQUEST:
(scheme, netloc, path, query_string, fragment) = urlsplit(complete_url)
fields = parse_qs(query_string)
fields[redirect_field_name] = request.REQUEST[redirect_field_name]
complete_url = urlunsplit(('', '', path, urlencode(fields), fragment))
return redirect(Consumer(request).get_url(complete_url=complete_url))
def complete(request, redirect_field_name=REDIRECT_FIELD_NAME):
response = Consumer(request).complete()
message = STATUS_MESSAGES[response.status]
user = authenticate(response=response)
if user:
auth_login(request, user)
messages.success(request, message)
return redirect_next(request, redirect_field_name, 'LOGIN_REDIRECT_URL')
else:
messages.error(request, message)
return redirect_next(request, redirect_field_name, 'LOGIN_FAIL_REDIRECT_URL')
def logout(request, redirect_field_name=REDIRECT_FIELD_NAME):
auth_logout(request)
messages.success(request, _('Successfully logged out.'))
return redirect_next(request, redirect_field_name, 'LOGOUT_REDIRECT_URL')
| bsd-3-clause | Python |
88fa50d94345716efa50fa8c970abd5b513c7c7f | Fix typo in version label | amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,jstewmon/proselint,amperser/proselint | proselint/version.py | proselint/version.py | """Proselint version number."""
__version__ = "0.3.4"
| """Wallace version number."""
__version__ = "0.3.4"
| bsd-3-clause | Python |
ad758c5f44bffe0b7468b9ccf2c5c4747cdba42c | add ast parse | faycheng/tpl,faycheng/tpl | tpl/render.py | tpl/render.py | # -*- coding:utf-8 -*-
import jinja2
import ast
from candy_prompt.prompt import prompt
env = jinja2.Environment(undefined=jinja2.StrictUndefined)
def render(tpl_text, context):
try:
return env.from_string(tpl_text).render(context)
except jinja2.UndefinedError as e:
undefined_var = e.message[1:-14]
value = prompt('{}: '.format(undefined_var))
try:
value = ast.literal_eval(value)
except Exception:
value = value
context.setdefault(undefined_var, value)
return render(tpl_text, context)
| # -*- coding:utf-8 -*-
import jinja2
from candy_prompt.prompt import prompt
env = jinja2.Environment(undefined=jinja2.StrictUndefined)
def render(tpl_text, context):
try:
return env.from_string(tpl_text).render(context)
except jinja2.UndefinedError as e:
undefined_var = e.message[1:-14]
value = prompt('{}: '.format(undefined_var))
context.setdefault(undefined_var, value)
return render(tpl_text, context)
| mit | Python |
0548708fa1d3bf4f4b6ad570714f3334bafb14ac | Fix help for project parser | ghickman/hoard-cli | trove/main.py | trove/main.py | import argparse
from frontend import FrontEnd
def run():
f = FrontEnd()
parser = argparse.ArgumentParser(
prog='trove',
epilog="See '%(prog)s <command> --help' for more help on a specific command."
)
sub_parsers = parser.add_subparsers(title='Commands')
get_parser = sub_parsers.add_parser('get', help='Retrieve variables for a specific deployment')
get_parser.add_argument('get', action='store_true', help=argparse.SUPPRESS)
get_parser.add_argument('--project')
get_parser.add_argument('--env')
get_parser.set_defaults(func=f.get)
set_parser = sub_parsers.add_parser('set', help='Create/update variable(s) in a specific deployment.')
set_parser.add_argument('set', nargs='+', metavar='VAR=value')
set_parser.add_argument('--project')
set_parser.add_argument('--env')
set_parser.set_defaults(func=f.set)
rm_parser = sub_parsers.add_parser('rm', help='Delete variable(s) in a specific deployment.')
rm_parser.add_argument('rm', nargs='+', metavar='VAR')
rm_parser.add_argument('--project')
rm_parser.add_argument('--env')
rm_parser.set_defaults(func=f.rm)
project_parser = sub_parsers.add_parser('project', help='Project specific commands.')
project_parser.add_argument('project', nargs='?', metavar='PROJECT', default='', help='default to project specified in ~/.troverc or .trove')
project_parser.add_argument('-a', '--all', action='store_true', help='show all projects')
project_parser.add_argument('--add', metavar='PROJECT')
project_parser.add_argument('-e', '--envs', action='store_true', help='show specified project\'s envs')
project_parser.set_defaults(func=f.project)
env_parser = sub_parsers.add_parser('env', help='Environment specific commands.')
env_parser.add_argument('env', action='store_true', help=argparse.SUPPRESS)
env_parser.add_argument('-a', '--all', action='store_true', help='show all envs')
env_parser.set_defaults(func=f.env)
login_parser = sub_parsers.add_parser('login', help='Authenticate with the server backend.')
login_parser.add_argument('login', action='store_true', help=argparse.SUPPRESS)
login_parser.set_defaults(func=f.login)
logout_parser = sub_parsers.add_parser('logout', help='Clear auth details.')
logout_parser.add_argument('logout', action='store_true', help=argparse.SUPPRESS)
logout_parser.set_defaults(func=f.logout)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
run()
| import argparse
from frontend import FrontEnd
def run():
f = FrontEnd()
parser = argparse.ArgumentParser(
prog='trove',
epilog="See '%(prog)s <command> --help' for more help on a specific command."
)
sub_parsers = parser.add_subparsers(title='Commands')
get_parser = sub_parsers.add_parser('get', help='Retrieve variables for a specific deployment')
get_parser.add_argument('get', action='store_true', help=argparse.SUPPRESS)
get_parser.add_argument('--project')
get_parser.add_argument('--env')
get_parser.set_defaults(func=f.get)
set_parser = sub_parsers.add_parser('set', help='Create/update variable(s) in a specific deployment.')
set_parser.add_argument('set', nargs='+', metavar='VAR=value')
set_parser.add_argument('--project')
set_parser.add_argument('--env')
set_parser.set_defaults(func=f.set)
rm_parser = sub_parsers.add_parser('rm', help='Delete variable(s) in a specific deployment.')
rm_parser.add_argument('rm', nargs='+', metavar='VAR')
rm_parser.add_argument('--project')
rm_parser.add_argument('--env')
rm_parser.set_defaults(func=f.rm)
project_parser = sub_parsers.add_parser('project', help='Project specific commands.')
project_parser.add_argument('project', nargs='?', metavar='PROJECT', default='', help='default to project specified in ~/.troverc or .trove')
project_parser.add_argument('-a', '--all', action='store_true', help='show all envs')
project_parser.add_argument('--add', metavar='PROJECT')
project_parser.add_argument('-e', '--envs', action='store_true', help='show specified project\'s envs')
project_parser.set_defaults(func=f.project)
env_parser = sub_parsers.add_parser('env', help='Environment specific commands.')
env_parser.add_argument('env', action='store_true', help=argparse.SUPPRESS)
env_parser.add_argument('-a', '--all', action='store_true', help='show all envs')
env_parser.set_defaults(func=f.env)
login_parser = sub_parsers.add_parser('login', help='Authenticate with the server backend.')
login_parser.add_argument('login', action='store_true', help=argparse.SUPPRESS)
login_parser.set_defaults(func=f.login)
logout_parser = sub_parsers.add_parser('logout', help='Clear auth details.')
logout_parser.add_argument('logout', action='store_true', help=argparse.SUPPRESS)
logout_parser.set_defaults(func=f.logout)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
run()
| mit | Python |
0eafefa9546fee3195198c54eb3362cbaa39ea8d | Update constants.py | pathakvaidehi2391/WorkSpace,pathakvaidehi2391/WorkSpace | azurecloudify/constants.py | azurecloudify/constants.py | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Look at https://github.com/cloudify-cosmo/cloudify-aws-plugin/blob/1.2/ec2/constants.py
# instance module constants
credentials =''
COMMON_REQ_PROPERTIES=['subscription_id','location']
api_version='2015-05-01-preview'
api_version_resource_group='2015-01-01'
storage_account_type = 'Standard_LRS'
vnet_address_prefixes = ["10.1.0.0/16","10.2.0.0/16"]
subnet_name = 'Subnet-1'
address_prefix = "10.1.0.0/24"
ip_config_name = 'myip1'
image_reference_publisher = 'Canonical'
image_reference_offer = 'UbuntuServer'
image_reference_sku = '14.04.2-LTS'
image_reference_version = 'latest'
os_disk_name = 'osdisk'
vm_caching = 'ReadWrite'
vm_createOption = 'FromImage'
admin_username='azuretest'
vm_version="latest"
azure_url='https://management.azure.com'
login_url='https://login.microsoftonline.com'
sourcePortRange=65000
destinationPortRange=60000
priority=100
resource='https://management.core.windows.net/'
RESOURCE_GROUP_REQUIRED_PROPERTIES=['vm_name','location','subscription_id']
STORAGE_ACCOUNT_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
VNET_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
VM_REQUIRED_PROPERTIES = ['vm_name','vm_os_type','vm_size','subscription_id','key_data','location']
NIC_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
PUBLIC_IP_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
| ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Look at https://github.com/cloudify-cosmo/cloudify-aws-plugin/blob/1.2/ec2/constants.py
# instance module constants
credentials =''
COMMON_REQ_PROPERTIES=['subscription_id','location']
api_version='2015-05-01-preview'
api_version_resource_group='2015-01-01'
storage_account_type = 'Standard_LRS'
vnet_address_prefixes = ["10.1.0.0/16","10.2.0.0/16"]
subnet_name = 'Subnet-1'
address_prefix = "10.1.0.0/24"
ip_config_name = 'myip1'
image_reference_publisher = 'Canonical'
image_reference_offer = 'UbuntuServer'
image_reference_sku = '14.04.2-LTS'
image_reference_version = 'latest'
os_disk_name = 'osdisk'
vm_caching = 'ReadWrite'
vm_createOption = 'FromImage'
admin_username='azuretest'
vm_version="latest"
azure_url='https://management.azure.com'
login_url='https://login.microsoftonline.com'
sourcePortRange=65000
destinationPortRange=60000
priority=100
resource=''https://management.core.windows.net/'
RESOURCE_GROUP_REQUIRED_PROPERTIES=['vm_name','location','subscription_id']
STORAGE_ACCOUNT_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
VNET_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
VM_REQUIRED_PROPERTIES = ['vm_name','vm_os_type','vm_size','subscription_id','key_data','location']
NIC_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
PUBLIC_IP_REQUIRED_PROPERTIES = ['vm_name','location','subscription_id']
| apache-2.0 | Python |
9222b5ab40aab356b32bea75ca32193bd556f85a | Update constants.py | pathakvaidehi2391/WorkSpace,pathakvaidehi2391/WorkSpace | azurecloudify/constants.py | azurecloudify/constants.py | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Look at https://github.com/cloudify-cosmo/cloudify-aws-plugin/blob/1.2/ec2/constants.py
# instance module constants
subscription_id = 'REPLACE_WITH_SUBSCRIPTION_ID'
COMMON_REQ_PROPERTIES=['subscription_id','location']
api_version='2015-01-01'
storage_account_type = 'Standard_LRS'
vnet_address_prefixes = ["10.1.0.0/16","10.2.0.0/16"]
subnet_name = 'Subnet-1'
address_prefix = "10.1.0.0/24"
ip_config_name = 'myip1'
image_reference_publisher = 'Canonical'
image_reference_offer = 'UbuntuServer'
image_reference_sku = '14.04.2-LTS'
image_reference_version = 'latest'
os_disk_name = 'osdisk'
vm_caching = 'ReadWrite'
vm_createOption = 'FromImage'
admin_username='azuretest'
vm_version="latest"
azure_url='https://management.azure.com'
RESOURCE_GROUP_REQUIRED_PROPERTIES=['resource_group_name','location']
STORAGE_ACCOUNT_REQUIRED_PROPERTIES = ['storage_account_name','location']
VNET_REQUIRED_PROPERTIES = ['vnet_name', 'location']
VM_REQUIRED_PROPERTIES = ['vm_name','vm_size','vm_type','vm_admin_username','key_data']
NIC_REQUIRED_PROPERTIES = ['nic_name','location']
PUBLIC_IP_REQUIRED_PROPERTIES = ['public_ip_name','location']
| ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Look at https://github.com/cloudify-cosmo/cloudify-aws-plugin/blob/1.2/ec2/constants.py
# instance module constants
subscription_id = 'REPLACE_WITH_SUBSCRIPTION_ID'
COMMON_REQ_PROPERTIES=['subscription_id','location']
api_version='2015-05-01-preview'
storage_account_type = 'Standard_LRS'
vnet_address_prefixes = ["10.1.0.0/16","10.2.0.0/16"]
subnet_name = 'Subnet-1'
address_prefix = "10.1.0.0/24"
ip_config_name = 'myip1'
image_reference_publisher = 'Canonical'
image_reference_offer = 'UbuntuServer'
image_reference_sku = '14.04.2-LTS'
image_reference_version = 'latest'
os_disk_name = 'osdisk'
vm_caching = 'ReadWrite'
vm_createOption = 'FromImage'
admin_username='azuretest'
vm_version="latest"
azure_url='https://management.azure.com'
RESOURCE_GROUP_REQUIRED_PROPERTIES=['resource_group_name','location']
STORAGE_ACCOUNT_REQUIRED_PROPERTIES = ['storage_account_name','location']
VNET_REQUIRED_PROPERTIES = ['vnet_name', 'location']
VM_REQUIRED_PROPERTIES = ['vm_name','vm_size','vm_type','vm_admin_username','key_data']
NIC_REQUIRED_PROPERTIES = ['nic_name','location']
PUBLIC_IP_REQUIRED_PROPERTIES = ['public_ip_name','location']
| apache-2.0 | Python |
6416d04d073310b7195b27fc0b33db6fec6c4a8b | Add URL-safe versions of base64 encode/decode | tomscript/babbage,tomscript/babbage,tomscript/babbage,tomscript/babbage,tomscript/babbage | backend/plugins/base_64.py | backend/plugins/base_64.py | """Base64 plugin for Babbage.
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'tomfitzgerald@google.com (Tom Fitzgerald)'
import base64
class Base64Encode(object):
def __init__(self):
self.name = 'Base 64 encode'
self.description = 'Returns a base 64 encoded string.'
self.options = []
def Process(self, incoming_data, unused_options):
"""Simple base64 encoding.
Args:
incoming_data: String of data to process.
unused_options: Not used.
Returns:
Base64 encoded string.
"""
return base64.b64encode(incoming_data)
class Base64Decode(object):
def __init__(self):
self.name = 'Base 64 decode'
self.description = 'Returns a base 64 decoded string.'
self.options = []
def Process(self, incoming_data, unused_options):
"""Simple base64 decode, accepting strings with omitted padding.
Args:
incoming_data: String of data to process, with or without padding.
unused_options: Not used.
Returns:
Base64 decoded string.
"""
missing_padding = 4 - len(incoming_data) % 4
return base64.b64decode(incoming_data + '=' * missing_padding)
class UrlSafeBase64Encode(object):
def __init__(self):
self.name = 'URL-safe Base 64 encode'
self.description = 'Returns a URL-safe base 64 encoded string.'
self.options = 0
def Process(self, incoming_data, unused_options):
"""URL-safe base64 encoding.
Substitutes - instead of + and _ instead of / in the standard Base64 alphabet.
Args:
incoming_data: String of data to process.
unused_options: Not used.
Returns:
URL-safe Base64 encoded string.
"""
return base64.urlsafe_b64encode(incoming_data)
class UrlSafeBase64Decode(object):
def __init__(self):
self.name = 'URL-safe Base 64 decode'
self.description = 'Returns a url-safe base 64 decoded string.'
self.options = 0
def Process(self, incoming_data, unused_options):
"""URL-safe base64 decode, accepting strings with omitted padding.
Substitutes - instead of + and _ instead of / in the standard Base64 alphabet.
Args:
incoming_data: String of data to process, with or without padding.
unused_options: Not used.
Returns:
URL-safe Base64 decoded string.
"""
missing_padding = 4 - len(incoming_data) % 4
return base64.urlsafe_b64decode(incoming_data + '=' * missing_padding)
| """Base64 plugin for Babbage.
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'tomfitzgerald@google.com (Tom Fitzgerald)'
import base64
class Base64Encode(object):
def __init__(self):
self.name = 'Base 64 encode'
self.description = 'Returns a base 64 encoded string.'
self.options = []
def Process(self, incoming_data, unused_options):
"""Simple base64 encoding.
Args:
incoming_data: String of data to process.
unused_options: Not used.
Returns:
Base64 encoded string.
"""
return base64.b64encode(incoming_data)
class Base64Decode(object):
def __init__(self):
self.name = 'Base 64 decode'
self.description = 'Returns a base 64 decoded string.'
self.options = []
def Process(self, incoming_data, unused_options):
"""Simple base64 decode, accepting strings with omitted padding.
Args:
incoming_data: String of data to process, with or without padding.
unused_options: Not used.
Returns:
Base64 decoded string.
"""
missing_padding = 4 - len(incoming_data) % 4
return base64.b64decode(incoming_data + '=' * missing_padding)
| apache-2.0 | Python |
72cd4718224b551e45c87de9d4074524e0f84d8b | Add a logger. | jilljenn/voyageavecmoi,jilljenn/voyageavecmoi,jilljenn/voyageavecmoi | backend/realtime_worker.py | backend/realtime_worker.py | import os
from retry.api import retry
from secret import CONSUMER_KEY, CONSUMER_SECRET
import twitter
import rethinkdb as r
MAX_TRIES = 3 # Number of times we try to get a tweet before giving up
def isTransportOffer(tweet):
return '#VoyageAvecMoi' in tweet['text']
def get_tweet_data(tweet):
return {
'text': tweet['text'],
'id': tweet['id_str'],
'user': {
'id': tweet['user']['id_str'],
'name': tweet['user']['name'],
'screen_name': tweet['user']['screen_name']
},
'confirmedAsOffer': False
}
class Logger:
def warning(self, fmt, error, delay):
print('Error: %r' % error)
print('Retrying in %s seconds.' % delay)
@retry(delay=1, backoff=2, logger=Logger())
def fetch_tweets(db, stream):
for tweet in stream.statuses.filter(track='#VoyageAvecMoi'):
print ('Got tweet.')
if isTransportOffer(tweet):
print ('Adding tweet from @{}'.format(tweet['user']['screen_name']))
tweet_data = get_tweet_data(tweet)
r.db('voyageavecmoi').table('offers').insert(tweet_data).run(db)
print('Done.')
MY_TWITTER_CREDS = os.path.expanduser('~/.my_app_credentials')
if not os.path.exists(MY_TWITTER_CREDS):
twitter.oauth_dance("Voyage avec moi", CONSUMER_KEY, CONSUMER_SECRET,
MY_TWITTER_CREDS)
oauth_token, oauth_secret = twitter.read_token_file(MY_TWITTER_CREDS)
auth = twitter.OAuth(oauth_token, oauth_secret, CONSUMER_KEY, CONSUMER_SECRET)
stream = twitter.TwitterStream(auth=auth)
db = r.connect('localhost', 28015)
fetch_tweets(db, stream)
| import os
from retry.api import retry
from secret import CONSUMER_KEY, CONSUMER_SECRET
import twitter
import rethinkdb as r
MAX_TRIES = 3 # Number of times we try to get a tweet before giving up
def isTransportOffer(tweet):
return '#VoyageAvecMoi' in tweet['text']
def get_tweet_data(tweet):
return {
'text': tweet['text'],
'id': tweet['id_str'],
'user': {
'id': tweet['user']['id_str'],
'name': tweet['user']['name'],
'screen_name': tweet['user']['screen_name']
},
'confirmedAsOffer': False
}
@retry(delay=1, backoff=2)
def fetch_tweets(db, stream):
for tweet in stream.statuses.filter(track='#VoyageAvecMoi'):
print ('Got tweet.')
if isTransportOffer(tweet):
print ('Adding tweet from @{}'.format(tweet['user']['screen_name']))
tweet_data = get_tweet_data(tweet)
r.db('voyageavecmoi').table('offers').insert(tweet_data).run(db)
print('Done.')
MY_TWITTER_CREDS = os.path.expanduser('~/.my_app_credentials')
if not os.path.exists(MY_TWITTER_CREDS):
twitter.oauth_dance("Voyage avec moi", CONSUMER_KEY, CONSUMER_SECRET,
MY_TWITTER_CREDS)
oauth_token, oauth_secret = twitter.read_token_file(MY_TWITTER_CREDS)
auth = twitter.OAuth(oauth_token, oauth_secret, CONSUMER_KEY, CONSUMER_SECRET)
stream = twitter.TwitterStream(auth=auth)
db = r.connect('localhost', 28015)
fetch_tweets(db, stream)
| agpl-3.0 | Python |
25abe35424aae1ba93f82f5f4adf3de21478cdea | Rename get image url function | gena/qgis-earthengine-plugin,gena/qgis-earthengine-plugin | ee_plugin/utils.py | ee_plugin/utils.py | # -*- coding: utf-8 -*-
"""
Utils functions GEE
"""
from qgis.core import QgsRasterLayer, QgsProject
from qgis.utils import iface
import ee
def get_ee_image_url(image):
map_id = ee.data.getMapId({'image': image})
url = map_id['tile_fetcher'].url_format
return url
def update_ee_layer_properties(layer, image, shown, opacity):
layer.setCustomProperty('ee-layer', True)
if not (opacity is None):
layer.renderer().setOpacity(opacity)
# serialize EE code
ee_script = image.serialize()
layer.setCustomProperty('ee-script', ee_script)
def add_ee_image_layer(image, name, shown, opacity):
url = "type=xyz&url=" + get_ee_image_url(image)
layer = QgsRasterLayer(url, name, "wms")
update_ee_layer_properties(layer, image, shown, opacity)
QgsProject.instance().addMapLayer(layer)
if not (shown is None):
QgsProject.instance().layerTreeRoot().findLayer(layer.id()).setItemVisibilityChecked(shown)
def update_ee_image_layer(image, layer, shown=True, opacity=1.0):
url = "type=xyz&url=" + get_ee_image_url(image)
layer.dataProvider().setDataSourceUri(url)
layer.dataProvider().reloadData()
update_ee_layer_properties(layer, image, shown, opacity)
layer.triggerRepaint()
layer.reload()
iface.mapCanvas().refresh()
item = QgsProject.instance().layerTreeRoot().findLayer(layer.id())
if not (shown is None):
item.setItemVisibilityChecked(shown)
def get_layer_by_name(name):
layers = QgsProject.instance().mapLayers().values()
for l in layers:
if l.name() == name:
return l
return None
def add_or_update_ee_image_layer(image, name, shown=True, opacity=1.0):
layer = get_layer_by_name(name)
if layer:
if not layer.customProperty('ee-layer'):
raise Exception('Layer is not an EE layer: ' + name)
update_ee_image_layer(image, layer, shown, opacity)
else:
add_ee_image_layer(image, name, shown, opacity)
def add_ee_catalog_image(name, asset_name, visParams, collection_props):
image = None
if collection_props:
raise Exception('Not supported yet')
else:
image = ee.Image(asset_name).visualize(visParams)
add_or_update_ee_image_layer(image, name)
| # -*- coding: utf-8 -*-
"""
Utils functions GEE
"""
from qgis.core import QgsRasterLayer, QgsProject
from qgis.utils import iface
import ee
def get_image_url(image):
map_id = ee.data.getMapId({'image': image})
url = map_id['tile_fetcher'].url_format
return url
def update_ee_layer_properties(layer, image, shown, opacity):
layer.setCustomProperty('ee-layer', True)
if not (opacity is None):
layer.renderer().setOpacity(opacity)
# serialize EE code
ee_script = image.serialize()
layer.setCustomProperty('ee-script', ee_script)
def add_ee_image_layer(image, name, shown, opacity):
url = "type=xyz&url=" + get_image_url(image)
layer = QgsRasterLayer(url, name, "wms")
update_ee_layer_properties(layer, image, shown, opacity)
QgsProject.instance().addMapLayer(layer)
if not (shown is None):
QgsProject.instance().layerTreeRoot().findLayer(layer.id()).setItemVisibilityChecked(shown)
def update_ee_image_layer(image, layer, shown=True, opacity=1.0):
url = "type=xyz&url=" + get_image_url(image)
layer.dataProvider().setDataSourceUri(url)
layer.dataProvider().reloadData()
update_ee_layer_properties(layer, image, shown, opacity)
layer.triggerRepaint()
layer.reload()
iface.mapCanvas().refresh()
item = QgsProject.instance().layerTreeRoot().findLayer(layer.id())
if not (shown is None):
item.setItemVisibilityChecked(shown)
def get_layer_by_name(name):
layers = QgsProject.instance().mapLayers().values()
for l in layers:
if l.name() == name:
return l
return None
def add_or_update_ee_image_layer(image, name, shown=True, opacity=1.0):
layer = get_layer_by_name(name)
if layer:
if not layer.customProperty('ee-layer'):
raise Exception('Layer is not an EE layer: ' + name)
update_ee_image_layer(image, layer, shown, opacity)
else:
add_ee_image_layer(image, name, shown, opacity)
def add_ee_catalog_image(name, asset_name, visParams, collection_props):
image = None
if collection_props:
raise Exception('Not supported yet')
else:
image = ee.Image(asset_name).visualize(visParams)
add_or_update_ee_image_layer(image, name)
| mit | Python |
e1422bb5e4a510b8f1844f50af0c4f1659b5cdfe | handle EOF and ProcessLookupError | OTAkeys/RIOT,rfuentess/RIOT,OTAkeys/RIOT,A-Paul/RIOT,neiljay/RIOT,kYc0o/RIOT,OTAkeys/RIOT,OlegHahm/RIOT,kYc0o/RIOT,authmillenon/RIOT,basilfx/RIOT,cladmi/RIOT,gebart/RIOT,lazytech-org/RIOT,immesys/RiSyn,yogo1212/RIOT,toonst/RIOT,RIOT-OS/RIOT,BytesGalore/RIOT,A-Paul/RIOT,OlegHahm/RIOT,biboc/RIOT,LudwigKnuepfer/RIOT,kbumsik/RIOT,A-Paul/RIOT,aeneby/RIOT,authmillenon/RIOT,x3ro/RIOT,authmillenon/RIOT,RIOT-OS/RIOT,LudwigOrtmann/RIOT,cladmi/RIOT,LudwigOrtmann/RIOT,OTAkeys/RIOT,kaspar030/RIOT,gebart/RIOT,adrianghc/RIOT,ks156/RIOT,A-Paul/RIOT,toonst/RIOT,x3ro/RIOT,adrianghc/RIOT,LudwigKnuepfer/RIOT,basilfx/RIOT,biboc/RIOT,Josar/RIOT,rfuentess/RIOT,jasonatran/RIOT,RIOT-OS/RIOT,jasonatran/RIOT,LudwigKnuepfer/RIOT,miri64/RIOT,kbumsik/RIOT,OlegHahm/RIOT,kYc0o/RIOT,LudwigOrtmann/RIOT,immesys/RiSyn,smlng/RIOT,neiljay/RIOT,gebart/RIOT,aeneby/RIOT,immesys/RiSyn,rfuentess/RIOT,aeneby/RIOT,kaspar030/RIOT,josephnoir/RIOT,mfrey/RIOT,ks156/RIOT,Josar/RIOT,basilfx/RIOT,kaspar030/RIOT,josephnoir/RIOT,smlng/RIOT,OlegHahm/RIOT,lazytech-org/RIOT,mtausig/RIOT,immesys/RiSyn,immesys/RiSyn,mtausig/RIOT,rfuentess/RIOT,kaspar030/RIOT,ks156/RIOT,aeneby/RIOT,yogo1212/RIOT,ks156/RIOT,kbumsik/RIOT,biboc/RIOT,adrianghc/RIOT,avmelnikoff/RIOT,mfrey/RIOT,toonst/RIOT,cladmi/RIOT,rfuentess/RIOT,OlegHahm/RIOT,mfrey/RIOT,ant9000/RIOT,smlng/RIOT,gebart/RIOT,jasonatran/RIOT,A-Paul/RIOT,roberthartung/RIOT,RIOT-OS/RIOT,kbumsik/RIOT,authmillenon/RIOT,josephnoir/RIOT,lazytech-org/RIOT,LudwigKnuepfer/RIOT,neiljay/RIOT,smlng/RIOT,adrianghc/RIOT,authmillenon/RIOT,toonst/RIOT,ks156/RIOT,gebart/RIOT,Josar/RIOT,BytesGalore/RIOT,roberthartung/RIOT,avmelnikoff/RIOT,BytesGalore/RIOT,cladmi/RIOT,aeneby/RIOT,smlng/RIOT,x3ro/RIOT,roberthartung/RIOT,x3ro/RIOT,miri64/RIOT,mtausig/RIOT,josephnoir/RIOT,miri64/RIOT,neiljay/RIOT,ant9000/RIOT,roberthartung/RIOT,yogo1212/RIOT,Josar/RIOT,Josar/RIOT,lazytech-org/RIOT,neiljay/RIOT,ant9000/RIOT,avmelnikoff/RIOT,adrianghc/RIOT,OTAkeys/RIOT,kbumsik/RIOT,kYc0o/RIOT,kaspar030/RIOT,miri64/RIOT,biboc/RIOT,BytesGalore/RIOT,immesys/RiSyn,roberthartung/RIOT,x3ro/RIOT,avmelnikoff/RIOT,LudwigOrtmann/RIOT,authmillenon/RIOT,toonst/RIOT,kYc0o/RIOT,ant9000/RIOT,mfrey/RIOT,mtausig/RIOT,LudwigKnuepfer/RIOT,ant9000/RIOT,BytesGalore/RIOT,basilfx/RIOT,jasonatran/RIOT,LudwigOrtmann/RIOT,LudwigOrtmann/RIOT,yogo1212/RIOT,biboc/RIOT,mtausig/RIOT,lazytech-org/RIOT,RIOT-OS/RIOT,basilfx/RIOT,mfrey/RIOT,jasonatran/RIOT,yogo1212/RIOT,yogo1212/RIOT,josephnoir/RIOT,cladmi/RIOT,avmelnikoff/RIOT,miri64/RIOT | dist/tools/testrunner/testrunner.py | dist/tools/testrunner/testrunner.py | # Copyright (C) 2017 Cenk Gündoğan <cenk.guendogan@haw-hamburg.de>
# 2016 Kaspar Schleiser <kaspar@schleiser.de>
# 2014 Martine Lenders <mlenders@inf.fu-berlin.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import signal
import sys
import subprocess
import time
from traceback import extract_tb, print_tb
import pexpect
PEXPECT_PATH = os.path.dirname(pexpect.__file__)
RIOTBASE = os.environ['RIOTBASE'] or \
os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
def list_until(l, cond):
return l[:([i for i, e in enumerate(l) if cond(e)][0])]
def find_exc_origin(exc_info):
pos = list_until(extract_tb(exc_info),
lambda frame: frame.filename.startswith(PEXPECT_PATH)
)[-1]
return pos.line, \
os.path.relpath(os.path.abspath(pos.filename), RIOTBASE), \
pos.lineno
def run(testfunc, timeout=10, echo=True, traceback=False):
env = os.environ.copy()
child = pexpect.spawnu("make term", env=env, timeout=timeout)
# on many platforms, the termprog needs a short while to be ready...
time.sleep(3)
if echo:
child.logfile = sys.stdout
try:
subprocess.check_output(('make', 'reset'), env=env,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# make reset yields error on some boards even if successful
pass
try:
testfunc(child)
except pexpect.TIMEOUT:
line, filename, lineno = find_exc_origin(sys.exc_info()[2])
print("Timeout in expect script at \"%s\" (%s:%d)" %
(line, filename, lineno))
if traceback:
print_tb(sys.exc_info()[2])
return 1
except pexpect.EOF:
line, filename, lineno = find_exc_origin(sys.exc_info()[2])
print("Unexpected end of file in expect script at \"%s\" (%s:%d)" %
(line, filename, lineno))
if traceback:
print_tb(sys.exc_info()[2])
return 1
finally:
print("")
try:
os.killpg(os.getpgid(child.pid), signal.SIGKILL)
except ProcessLookupError:
print("Process already stopped")
child.close()
return 0
| # Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
# 2014 Martine Lenders <mlenders@inf.fu-berlin.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import signal
import sys
import subprocess
import time
from traceback import extract_tb, print_tb
import pexpect
PEXPECT_PATH = os.path.dirname(pexpect.__file__)
RIOTBASE = os.environ['RIOTBASE'] or \
os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
def list_until(l, cond):
return l[:([i for i, e in enumerate(l) if cond(e)][0])]
def find_exc_origin(exc_info):
pos = list_until(extract_tb(exc_info),
lambda frame: frame.filename.startswith(PEXPECT_PATH)
)[-1]
return pos.line, \
os.path.relpath(os.path.abspath(pos.filename), RIOTBASE), \
pos.lineno
def run(testfunc, timeout=10, echo=True, traceback=False):
env = os.environ.copy()
child = pexpect.spawnu("make term", env=env, timeout=timeout)
# on many platforms, the termprog needs a short while to be ready...
time.sleep(3)
if echo:
child.logfile = sys.stdout
try:
subprocess.check_output(('make', 'reset'), env=env,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# make reset yields error on some boards even if successful
pass
try:
testfunc(child)
except pexpect.TIMEOUT:
line, filename, lineno = find_exc_origin(sys.exc_info()[2])
print("Timeout in expect script at \"%s\" (%s:%d)" %
(line, filename, lineno))
if traceback:
print_tb(sys.exc_info()[2])
return 1
finally:
print("")
os.killpg(os.getpgid(child.pid), signal.SIGKILL)
child.close()
return 0
| lgpl-2.1 | Python |
d7f04e845214f599bb868146bef46e00ac734e6e | Switch back to HTTP, premailer is still fucked | lutris/website,lutris/website,lutris/website,lutris/website | emails/messages.py | emails/messages.py | """Email utility functions"""
from six import string_types
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from premailer import transform
def send_game_accepted(user, game):
"""Email an user when their game submission is accepted"""
context = {
'username': user.username,
'name': game.name,
'game_url': game.get_absolute_url()
}
subject = u"Your game submission for '{}' has been accepted!".format(game.name)
return send_email('game_accepted', context, subject, user.email)
def send_account_creation(user, confirmation_link):
"""Email users on account creation"""
context = {
'username': user.username,
'confirmation_link': confirmation_link
}
subject = 'Welcome to Lutris.net'
return send_email('account_creation', context, subject, user.email)
def send_email(template, context, subject, recipients, sender=None):
"""Send an email using a HTML template"""
if not settings.SEND_EMAILS:
return
context.update({
'STATIC_URL': settings.STATIC_URL,
'MEDIA_URL': settings.MEDIA_URL,
})
sender = sender or settings.DEFAULT_FROM_EMAIL
if isinstance(recipients, string_types):
recipients = [recipients]
subject = u"{} {}".format(settings.EMAIL_SUBJECT_PREFIX, subject)
text_part = render_to_string('emails/{}.txt'.format(template), context)
msg = EmailMultiAlternatives(
subject=subject,
body=text_part,
to=recipients,
from_email=sender
)
html_body = render_to_string('emails/{}.html'.format(template), context)
# Premailer does not handle https links, the site can't access itself
# with HTTPS inside its container.
# Under no circumstances this should be switched to https.
html_part = transform(html_body, base_url='http://lutris.net')
msg.attach_alternative(html_part, "text/html")
return msg.send(False)
| """Email utility functions"""
from six import string_types
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from premailer import transform
def send_game_accepted(user, game):
"""Email an user when their game submission is accepted"""
context = {
'username': user.username,
'name': game.name,
'game_url': game.get_absolute_url()
}
subject = u"Your game submission for '{}' has been accepted!".format(game.name)
return send_email('game_accepted', context, subject, user.email)
def send_account_creation(user, confirmation_link):
"""Email users on account creation"""
context = {
'username': user.username,
'confirmation_link': confirmation_link
}
subject = 'Welcome to Lutris.net'
return send_email('account_creation', context, subject, user.email)
def send_email(template, context, subject, recipients, sender=None):
"""Send an email using a HTML template"""
if not settings.SEND_EMAILS:
return
context.update({
'STATIC_URL': settings.STATIC_URL,
'MEDIA_URL': settings.MEDIA_URL,
})
sender = sender or settings.DEFAULT_FROM_EMAIL
if isinstance(recipients, string_types):
recipients = [recipients]
subject = u"{} {}".format(settings.EMAIL_SUBJECT_PREFIX, subject)
text_part = render_to_string('emails/{}.txt'.format(template), context)
msg = EmailMultiAlternatives(
subject=subject,
body=text_part,
to=recipients,
from_email=sender
)
html_body = render_to_string('emails/{}.html'.format(template), context)
html_part = transform(html_body, base_url='https://lutris.net')
msg.attach_alternative(html_part, "text/html")
return msg.send(False)
| agpl-3.0 | Python |
7f9ca6993cb835a6941a992a537c6a026d03998e | Update DEVICE_CREDS.py | rumo/netmiko,fooelisa/netmiko,ktbyers/netmiko,rumo/netmiko,isponline/netmiko,mzbenami/netmiko,enzzzy/netmiko,isponline/netmiko,shsingh/netmiko,nitzmahone/netmiko,jinesh-patel/netmiko,ivandgreat/netmiko,enzzzy/netmiko,MikeOfNoTrades/netmiko,shamanu4/netmiko,isidroamv/netmiko,ivandgreat/netmiko,nitzmahone/netmiko,rdezavalia/netmiko,fooelisa/netmiko,jinesh-patel/netmiko,MikeOfNoTrades/netmiko,mzbenami/netmiko,ktbyers/netmiko,brutus333/netmiko,shsingh/netmiko,jumpojoy/netmiko,mileswdavis/netmiko,rdezavalia/netmiko,isidroamv/netmiko,brutus333/netmiko,jumpojoy/netmiko,mileswdavis/netmiko,nvoron23/netmiko,shamanu4/netmiko | examples/DEVICE_CREDS.py | examples/DEVICE_CREDS.py | cisco_881 = {
'device_type': 'cisco_ios',
'ip': '10.10.10.227',
'username': 'test1',
'password': 'password',
'secret': 'secret',
'verbose': False,
}
cisco_asa = {
'device_type': 'cisco_asa',
'ip': '10.10.10.226',
'username': 'admin',
'password': 'password',
'secret': 'secret',
'verbose': False,
}
arista_veos_sw1 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8222,
'verbose': False,
}
arista_veos_sw2 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8322,
'verbose': False,
}
arista_veos_sw3 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8422,
'verbose': False,
}
arista_veos_sw4 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8522,
'verbose': False,
}
hp_procurve = {
'device_type': 'hp_procurve',
'ip': '10.10.10.227',
'username': 'admin',
'password': 'password',
'secret': '',
'port': 9922,
'verbose': False,
}
f5_ltm = {
'device_type': 'hp_procurve',
'ip': '10.10.10.227',
'username': 'admin',
'password': 'password',
'secret': '',
'port': 22,
'verbose': False,
}
all_devices = [
cisco_881,
cisco_asa,
arista_veos_sw1,
arista_veos_sw2,
arista_veos_sw3,
arista_veos_sw4,
hp_procurve
f5_ltm
]
| cisco_881 = {
'device_type': 'cisco_ios',
'ip': '10.10.10.227',
'username': 'test1',
'password': 'password',
'secret': 'secret',
'verbose': False,
}
cisco_asa = {
'device_type': 'cisco_asa',
'ip': '10.10.10.226',
'username': 'admin',
'password': 'password',
'secret': 'secret',
'verbose': False,
}
arista_veos_sw1 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8222,
'verbose': False,
}
arista_veos_sw2 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8322,
'verbose': False,
}
arista_veos_sw3 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8422,
'verbose': False,
}
arista_veos_sw4 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8522,
'verbose': False,
}
hp_procurve = {
'device_type': 'hp_procurve',
'ip': '10.10.10.227',
'username': 'admin',
'password': 'password',
'secret': '',
'port': 9922,
'verbose': False,
}
all_devices = [
cisco_881,
cisco_asa,
arista_veos_sw1,
arista_veos_sw2,
arista_veos_sw3,
arista_veos_sw4,
hp_procurve
]
| mit | Python |
eaec82bb0a4a11f683c34550bdc23b3c6b0c48d2 | Update script to export to nml2 of m1 | Neurosim-lab/netpyne,thekerrlab/netpyne,Neurosim-lab/netpyne | examples/M1/M1_export.py | examples/M1/M1_export.py | import M1 # import parameters file
from netpyne import sim # import netpyne sim module
sim.createAndExportNeuroML2(netParams = M1.netParams,
simConfig = M1.simConfig,
reference = 'M1',
connections=True,
stimulations=True) # create and export network to NeuroML 2 | import M1 # import parameters file
from netpyne import sim # import netpyne sim module
sim.createAndExport(netParams = M1.netParams,
simConfig = M1.simConfig,
reference = 'M1') # create and export network to NeuroML 2 | mit | Python |
3f1966ab10edca6df5a8b22df3fc4b7375ea8c4c | Fix for Issue#3 | ianunay/sublime_scratchpad,i-anunay/sublime_scratchpad,i-anunay/sublime_scratchpad | Scratchpad.py | Scratchpad.py | from sublime_plugin import WindowCommand, TextCommand
from sublime import packages_path, run_command, ENCODED_POSITION
from time import strftime
from os.path import isfile
headerText = """
_____ _ _ _
/ ___| | | | | | |
\ `--. ___ _ __ __ _| |_ ___| |__ _ __ __ _ __| |
`--. \/ __| '__/ _` | __/ __| '_ \| '_ \ / _` |/ _` |
/\__/ / (__| | | (_| | || (__| | | | |_) | (_| | (_| |
\____/ \___|_| \__,_|\__\___|_| |_| .__/ \__,_|\__,_|
| |
|_|
"""
class OpenScratchpadCommand(WindowCommand):
def run(self):
scratchpadFile = packages_path()[:-8]+'scratchpad.txt'
checkAndFillEmpty(scratchpadFile)
self.window.open_file(scratchpadFile)
class ScratchpadCommand(WindowCommand):
def run(self):
scratchpadFile = packages_path()[:-8]+'scratchpad.txt'
global headerText
checkAndFillEmpty(scratchpadFile)
count = putTimeStamp(scratchpadFile)
self.window.open_file(scratchpadFile+':'+str(count+1), ENCODED_POSITION)
def checkAndFillEmpty(scratchpadFile):
global headerText
if not isfile(scratchpadFile):
with open(scratchpadFile, "a") as scratchFile:
scratchFile.write(headerText)
def putTimeStamp(scratchpadFile):
timeStamp = "\n\n" + strftime("%c") + " : " + "\n" +"========================" + "\n"
with open(scratchpadFile, "a") as scratchFile:
scratchFile.write(timeStamp)
with open(scratchpadFile) as scratchFile:
count = sum(1 for line in scratchFile)
return count
| from sublime_plugin import WindowCommand, TextCommand
from sublime import packages_path, run_command, ENCODED_POSITION
from time import strftime
from os.path import isfile
headerText = """
_____ _ _ _
/ ___| | | | | | |
\ `--. ___ _ __ __ _| |_ ___| |__ _ __ __ _ __| |
`--. \/ __| '__/ _` | __/ __| '_ \| '_ \ / _` |/ _` |
/\__/ / (__| | | (_| | || (__| | | | |_) | (_| | (_| |
\____/ \___|_| \__,_|\__\___|_| |_| .__/ \__,_|\__,_|
| |
|_|
"""
class OpenScratchpadCommand(WindowCommand):
def run(self):
scratchpadFile = packages_path()[:-8]+'scratchpad.txt'
checkAndFillEmpty(scratchpadFile)
self.window.open_file(scratchpadFile)
class ScratchpadCommand(WindowCommand):
def run(self):
scratchpadFile = packages_path()[:-8]+'scratchpad.txt'
global headerText
checkAndFillEmpty(scratchpadFile)
count = putTimeStamp(scratchpadFile)
self.window.open_file(scratchpadFile+':'+str(count+1), ENCODED_POSITION)
def checkAndFillEmpty(scratchpadFile):
global headerText
if not isfile(scratchpadFile):
with open(scratchpadFile, "a") as scratchFile:
scratchFile.write(headerText)
def putTimeStamp(scratchpadFile):
timeStamp = "\n" + strftime("%c") + " : " + "\n" +"========================" + "\n"
with open(scratchpadFile, "a") as scratchFile:
scratchFile.write(timeStamp)
with open(scratchpadFile) as scratchFile:
count = sum(1 for line in scratchFile)
return count
| mit | Python |
e13e602025ffe2374237a0710a8cef3d2f477247 | use constant TRAVIS_API_URL | buildtimetrend/python-lib | buildtimetrend/travis.py | buildtimetrend/travis.py | '''
vim: set expandtab sw=4 ts=4:
Interface to Travis CI API.
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib2
import json
TRAVIS_API_URL = 'https://api.travis-ci.org/'
class TravisData(object):
'''
Gather data from Travis CI using the API
'''
def __init__(self, repo, build_id):
'''
Retrieve Travis CI build data using the API.
Param repo : github repository slug (fe. ruleant/buildtime-trend)
Param build_id : Travis CI build id (fe. 158)
'''
self.build_data = {}
self.repo = repo
self.build_id = str(build_id)
def get_build_data(self):
'''
Retrieve Travis CI build data using the API.
'''
req = urllib2.Request(
TRAVIS_API_URL + 'repos/' + self.repo
+ '/builds?number=' + self.build_id,
None,
{
# get version from Config class
'user-agent': 'buildtime-trend/0.2-dev',
'accept': 'application/vnd.travis-ci.2+json'
}
)
opener = urllib2.build_opener()
result = opener.open(req)
self.build_data = json.load(result)
def get_started_at(self):
'''
Retrieve timestamp when build was started.
'''
if len(self.build_data) > 0:
return self.build_data['builds'][0]['started_at']
else:
return None
| '''
vim: set expandtab sw=4 ts=4:
Interface to Travis CI API.
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib2
import json
class TravisData(object):
'''
Gather data from Travis CI using the API
'''
def __init__(self, repo, build_id):
'''
Retrieve Travis CI build data using the API.
Param repo : github repository slug (fe. ruleant/buildtime-trend)
Param build_id : Travis CI build id (fe. 158)
'''
self.build_data = {}
self.repo = repo
self.build_id = str(build_id)
def get_build_data(self):
'''
Retrieve Travis CI build data using the API.
'''
req = urllib2.Request(
'https://api.travis-ci.org/repos/' + self.repo
+ '/builds?number=' + self.build_id,
None,
{
# get version from Config class
'user-agent': 'buildtime-trend/0.2-dev',
'accept': 'application/vnd.travis-ci.2+json'
}
)
opener = urllib2.build_opener()
result = opener.open(req)
self.build_data = json.load(result)
def get_started_at(self):
'''
Retrieve timestamp when build was started.
'''
if len(self.build_data) > 0:
return self.build_data['builds'][0]['started_at']
else:
return None
| agpl-3.0 | Python |
25aca3b7152229b6f0fd7fa9c9f81639f77263dd | update urls: spacing fixes | dfalk/mezzanine-wiki | mezzanine_wiki/urls.py | mezzanine_wiki/urls.py |
from django.conf.urls.defaults import patterns, url
# Wiki patterns.
urlpatterns = patterns("mezzanine_wiki.views",
url("^$", "wiki_index", name="wiki_index"),
url("^pages:new/$", "wiki_page_new", name="wiki_page_new"),
url("^pages:list/$", "wiki_page_list", name="wiki_page_list"),
url("^pages:changes/$", "wiki_page_changes", name="wiki_page_changes"),
url("^tag:(?P<tag>.*)/$", "wiki_page_list", name="wiki_page_list_tag"),
url("^category:(?P<category>.*)/$", "wiki_page_list",
name="wiki_page_list_category"),
url("^author:(?P<username>.*)/$", "wiki_page_list",
name="wiki_page_list_author"),
url("^(?P<slug>.*)/history/$", "wiki_page_history",
name="wiki_page_history"),
url("^(?P<slug>.*)/history/(?P<rev_id>\d+)/$", "wiki_page_revision",
name="wiki_page_revision"),
url("^(?P<slug>.*)/diff/$", "wiki_page_diff",
name="wiki_page_diff"),
url("^(?P<slug>.*)/revert/(?P<revision_pk>[0-9]+)/$", "wiki_page_revert",
name="wiki_page_revert"),
url("^(?P<slug>.*)/undo/(?P<revision_pk>[0-9]+)/$", "wiki_page_undo",
name="wiki_page_undo"),
url("^(?P<slug>.*)/edit/$", "wiki_page_edit", name="wiki_page_edit"),
url("^(?P<slug>.*)/$", "wiki_page_detail", name="wiki_page_detail"),
)
|
from django.conf.urls.defaults import patterns, url
# Wiki patterns.
urlpatterns = patterns("mezzanine_wiki.views",
url("^$", "wiki_index", name="wiki_index"),
url("^pages:new/$", "wiki_page_new", name="wiki_page_new"),
url("^pages:list/$", "wiki_page_list", name="wiki_page_list"),
url("^pages:changes/$", "wiki_page_changes", name="wiki_page_changes"),
url("^tag:(?P<tag>.*)/$", "wiki_page_list", name="wiki_page_list_tag"),
url("^category:(?P<category>.*)/$", "wiki_page_list",
name="wiki_page_list_category"),
url("^author:(?P<username>.*)/$", "wiki_page_list",
name="wiki_page_list_author"),
url("^(?P<slug>.*)/history/$", "wiki_page_history",
name="wiki_page_history"),
url("^(?P<slug>.*)/history/(?P<rev_id>\d+)/$", "wiki_page_revision",
name="wiki_page_revision"),
url("^(?P<slug>.*)/diff/$", "wiki_page_diff",
name="wiki_page_diff"),
url("^(?P<slug>.*)/revert/(?P<revision_pk>[0-9]+)/$", "wiki_page_revert",
name="wiki_page_revert"),
url("^(?P<slug>.*)/undo/(?P<revision_pk>[0-9]+)/$", "wiki_page_undo",
name="wiki_page_undo"),
url("^(?P<slug>.*)/edit/$", "wiki_page_edit", name="wiki_page_edit"),
url("^(?P<slug>.*)/$", "wiki_page_detail", name="wiki_page_detail"),
)
| bsd-2-clause | Python |
7b486ccf269314a3105db17fa0e8d61e42711398 | migrate spj | ultmaster/eoj3,ultmaster/eoj3,ultmaster/eoj3,ultmaster/eoj3 | migrate/migrate_spj.py | migrate/migrate_spj.py | from problem.models import Problem, SpecialProgram
import traceback
def run():
# try:
# for problem in Problem.objects.all():
# if problem.judge == '' or problem.judge == 'fcmp':
# continue
# if SpecialProgram.objects.filter(filename__contains=problem.judge).exists():
# problem.checker = SpecialProgram.objects.filter(filename__contains=problem.judge).first().fingerprint
# else:
# problem.visible = False
# problem.save(update_fields=['checker', 'visible'])
# except:
# traceback.print_exc()
try:
for problem in Problem.objects.all():
if not SpecialProgram.objects.filter(fingerprint=problem.checker).exists():
print(problem.checker)
except:
traceback.print_exc() | from problem.models import Problem, SpecialProgram
import traceback
def run():
try:
for problem in Problem.objects.all():
if problem.judge == '' or problem.judge == 'fcmp':
continue
if SpecialProgram.objects.filter(filename__contains=problem.judge).exists():
problem.checker = SpecialProgram.objects.filter(filename__contains=problem.judge).first().fingerprint
else:
problem.visible = False
problem.save(update_fields=['checker', 'visible'])
except:
traceback.print_exc() | mit | Python |
3e822a60255832279282392038612ba516290009 | fix circular import fail in Python 2.7 | niboshi/chainer,niboshi/chainer,jnishi/chainer,chainer/chainer,wkentaro/chainer,pfnet/chainer,ktnyt/chainer,jnishi/chainer,jnishi/chainer,chainer/chainer,okuta/chainer,hvy/chainer,keisuke-umezawa/chainer,wkentaro/chainer,ktnyt/chainer,okuta/chainer,keisuke-umezawa/chainer,ktnyt/chainer,chainer/chainer,niboshi/chainer,wkentaro/chainer,chainer/chainer,okuta/chainer,okuta/chainer,niboshi/chainer,hvy/chainer,keisuke-umezawa/chainer,jnishi/chainer,tkerola/chainer,hvy/chainer,hvy/chainer,keisuke-umezawa/chainer,wkentaro/chainer,ktnyt/chainer | chainer/backends/_cpu.py | chainer/backends/_cpu.py | import numpy
from chainer import _backend
import chainer.backends
import chainerx
class CpuDevice(_backend.Device):
@property
def xp(self):
return numpy
@staticmethod
def from_array(array):
if isinstance(array, numpy.ndarray):
return CpuDevice()
return None
def __eq__(self, other):
return isinstance(other, CpuDevice)
def __repr__(self):
return '<{} (numpy)>'.format(self.__class__.__name__)
def send_array(self, array):
return _array_to_numpy(array)
def _get_device(device_spec):
if device_spec is numpy:
return CpuDevice()
return None
def _to_numpy(array):
"""Converts an array or arrays to NumPy."""
return _backend._convert_arrays(array, _array_to_numpy)
def _array_to_numpy(array):
if array is None:
return None
if isinstance(array, numpy.ndarray):
return array
if isinstance(array, chainer.backends.intel64.mdarray):
return numpy.asarray(array)
if isinstance(array, chainerx.ndarray):
return chainerx.to_numpy(array, copy=False)
if isinstance(array, chainer.backends.cuda.ndarray):
with chainer.backends.cuda.get_device_from_array(array):
return array.get()
if numpy.isscalar(array):
return numpy.asarray(array)
raise TypeError(
'Array cannot be converted into an numpy.ndarray'
'\nActual type: {0}.'.format(type(array)))
| import numpy
from chainer import _backend
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
class CpuDevice(_backend.Device):
@property
def xp(self):
return numpy
@staticmethod
def from_array(array):
if isinstance(array, numpy.ndarray):
return CpuDevice()
return None
def __eq__(self, other):
return isinstance(other, CpuDevice)
def __repr__(self):
return '<{} (numpy)>'.format(self.__class__.__name__)
def send_array(self, array):
return _array_to_numpy(array)
def _get_device(device_spec):
if device_spec is numpy:
return CpuDevice()
return None
def _to_numpy(array):
"""Converts an array or arrays to NumPy."""
return _backend._convert_arrays(array, _array_to_numpy)
def _array_to_numpy(array):
if array is None:
return None
if isinstance(array, numpy.ndarray):
return array
if isinstance(array, intel64.mdarray):
return numpy.asarray(array)
if isinstance(array, chainerx.ndarray):
return chainerx.to_numpy(array, copy=False)
if isinstance(array, cuda.ndarray):
with cuda.get_device_from_array(array):
return array.get()
if numpy.isscalar(array):
return numpy.asarray(array)
raise TypeError(
'Array cannot be converted into an numpy.ndarray'
'\nActual type: {0}.'.format(type(array)))
| mit | Python |
9b855f8b72e485fd6484037d709fd0ad1c1d39a4 | Remove duplicate insert of flags | PolyJIT/benchbuild,PolyJIT/benchbuild,PolyJIT/benchbuild,PolyJIT/benchbuild | benchbuild/projects/benchbuild/ccrypt.py | benchbuild/projects/benchbuild/ccrypt.py | from os import path
from benchbuild.projects.benchbuild.group import BenchBuildGroup
from benchbuild.utils.wrapping import wrap
from benchbuild.utils.run import run
from benchbuild.utils.downloader import Wget
from benchbuild.utils.compiler import lt_clang, lt_clang_cxx
from benchbuild.utils.cmd import tar, make
from plumbum import local
class Ccrypt(BenchBuildGroup):
""" ccrypt benchmark """
NAME = 'ccrypt'
DOMAIN = 'encryption'
VERSION = '1.10'
SRC_FILE = 'ccrypt-{0}.tar.gz'.format(VERSION)
src_dir = "ccrypt-{0}".format(VERSION)
src_uri = \
"http://ccrypt.sourceforge.net/download/ccrypt-{0}.tar.gz".format(
VERSION)
def download(self):
Wget(self.src_uri, self.SRC_FILE)
tar('xfz', path.join(self.builddir, self.SRC_FILE))
def configure(self):
clang = lt_clang(self.cflags, self.ldflags, self.compiler_extension)
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags,
self.compiler_extension)
ccrypt_dir = path.join('.', self.src_dir)
with local.cwd(ccrypt_dir):
configure = local["./configure"]
with local.env(CC=str(clang),
CXX=str(clang_cxx)):
run(configure)
def build(self):
ccrypt_dir = path.join('.', self.src_dir)
with local.cwd(ccrypt_dir):
run(make["check"])
def run_tests(self, experiment, run):
ccrypt_dir = path.join(self.builddir, self.src_dir)
with local.cwd(ccrypt_dir):
wrap(path.join(ccrypt_dir, "src", self.name), experiment)
wrap(path.join(ccrypt_dir, "check", "crypt3-check"), experiment)
wrap(path.join(ccrypt_dir, "check", "rijndael-check"), experiment)
run(make["check"])
| from os import path
from benchbuild.projects.benchbuild.group import BenchBuildGroup
from benchbuild.utils.wrapping import wrap
from benchbuild.utils.run import run
from benchbuild.utils.downloader import Wget
from benchbuild.utils.compiler import lt_clang, lt_clang_cxx
from benchbuild.utils.cmd import tar, make
from plumbum import local
class Ccrypt(BenchBuildGroup):
""" ccrypt benchmark """
NAME = 'ccrypt'
DOMAIN = 'encryption'
VERSION = '1.10'
SRC_FILE = 'ccrypt-{0}.tar.gz'.format(VERSION)
src_dir = "ccrypt-{0}".format(VERSION)
src_uri = \
"http://ccrypt.sourceforge.net/download/ccrypt-{0}.tar.gz".format(
VERSION)
def download(self):
Wget(self.src_uri, self.SRC_FILE)
tar('xfz', path.join(self.builddir, self.SRC_FILE))
def configure(self):
clang = lt_clang(self.cflags, self.ldflags, self.compiler_extension)
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags,
self.compiler_extension)
ccrypt_dir = path.join('.', self.src_dir)
with local.cwd(ccrypt_dir):
configure = local["./configure"]
with local.env(CC=str(clang),
CXX=str(clang_cxx),
CFLAGS=" ".join(self.cflags),
CXXFLAGS=" ".join(self.cflags),
LDFLAGS=" ".join(self.ldflags)):
run(configure)
def build(self):
ccrypt_dir = path.join('.', self.src_dir)
with local.cwd(ccrypt_dir):
run(make["check"])
def run_tests(self, experiment, run):
ccrypt_dir = path.join(self.builddir, self.src_dir)
with local.cwd(ccrypt_dir):
wrap(path.join(ccrypt_dir, "src", self.name), experiment)
wrap(path.join(ccrypt_dir, "check", "crypt3-check"), experiment)
wrap(path.join(ccrypt_dir, "check", "rijndael-check"), experiment)
run(make["check"])
| mit | Python |
2d56f9417030410ecb606ee6aa5dc8ea58633f83 | Fix Tapastic scraper | webcomics/dosage,webcomics/dosage | dosagelib/plugins/tapastic.py | dosagelib/plugins/tapastic.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url + '/info'
self.stripUrl = self.baseUrl + 'episode/%s'
def getPrevUrl(self, url, data):
# Retrieve comic metadata from API
data = self.session.get(url + '/info')
data.raise_for_status()
apiData = data.json()['data']
if apiData['scene'] == 2:
self.firstStripUrl = self.stripUrl % apiData['prev_ep_id']
return self.stripUrl % apiData['prev_ep_id']
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('AmpleTime', 'Ample-Time'),
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
| # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import json
import re
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url
self.stripUrl = self.baseUrl + 'episode/%s'
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('AmpleTime', 'Ample-Time'),
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
| mit | Python |
18be54e7ca9937b19295b07b3d0cc2f317c65cab | Update app.py | Fillll/reddit2telegram,nsiregar/reddit2telegram,Fillll/reddit2telegram,nsiregar/reddit2telegram | channels/ya_metro/app.py | channels/ya_metro/app.py | #encoding:utf-8
from urllib.parse import urlparse
from utils import get_url, weighted_random_subreddit
t_channel = '@ya_metro'
subreddit = weighted_random_subreddit({'Subways': 0.6,
'LondonUnderground': 0.4,
'Trams': 0.3
})
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.shortlink
text = '{}\n{}'.format(title, link)
if what == 'text':
if submission.score >= 4:
punchline = submission.selftext
text = '{title}\n\n{body}\n\n{link}'.format(
title=title, body=punchline, link=link)
return r2t.send_text(text, disable_web_page_preview=True)
else:
return False
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what == 'other':
domain = urlparse(url).netloc
if domain in ('www.youtube.com', 'youtu.be'):
text = '{}\n{}\n\n{}'.format(title, url, link)
return r2t.send_text(text)
elif submission.score >= 4:
text = '{}\n{}\n\n{}'.format(title, url, link)
return r2t.send_text(text)
else:
return False
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
| #encoding:utf-8
from urllib.parse import urlparse
from utils import get_url, weighted_random_subreddit
t_channel = '@ya_metro'
subreddit = weighted_random_subreddit({'Subways': 0.6,
'LondonUnderground': 0.4
})
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.shortlink
text = '{}\n{}'.format(title, link)
if what == 'text':
if submission.score >= 4:
punchline = submission.selftext
text = '{title}\n\n{body}\n\n{link}'.format(
title=title, body=punchline, link=link)
return r2t.send_text(text, disable_web_page_preview=True)
else:
return False
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what == 'other':
domain = urlparse(url).netloc
if domain in ('www.youtube.com', 'youtu.be'):
text = '{}\n{}\n\n{}'.format(title, url, link)
return r2t.send_text(text)
elif submission.score >= 4:
text = '{}\n{}\n\n{}'.format(title, url, link)
return r2t.send_text(text)
else:
return False
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
| mit | Python |
849a115ef1e86b7a2b01ccb43d0ea4e713ab070f | Update example_pool.py | aio-libs/aioodbc,jettify/aioodbc | examples/example_pool.py | examples/example_pool.py | import asyncio
import aioodbc
loop = asyncio.get_event_loop()
async def test_pool():
dsn = 'Driver=SQLite;Database=sqlite.db'
pool = await aioodbc.create_pool(dsn=dsn, loop=loop)
async with pool.get as conn:
cur = await conn.cursor()
await cur.execute("SELECT 42;")
r = await cur.fetchall()
print(r)
await cur.close()
await conn.close()
pool.close()
await pool.wait_closed()
loop.run_until_complete(test_example())
| import asyncio
import aioodbc
loop = asyncio.get_event_loop()
async def test_pool():
dsn = 'Driver=SQLite;Database=sqlite.db'
pool = await aioodbc.create_pool(dsn=dsn, loop=loop)
async with (await pool) as conn:
cur = await conn.cursor()
await cur.execute("SELECT 42;")
r = await cur.fetchall()
print(r)
await cur.close()
await conn.close()
pool.close()
await pool.wait_closed()
loop.run_until_complete(test_example())
| apache-2.0 | Python |
9098f24fda02f1bc4eec585e807f14fdf7a8bb1d | Include engine names from the 'pygraphviz' library instead of maintaining a local list. | homeworkprod/chatrelater,TheLady/chatrelater | chatrelater/visualize.py | chatrelater/visualize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chat Relater's Visualizer
~~~~~~~~~~~~~~~~~~~~~~~~~
Visualize relations between chat partners.
For graphical output, GraphViz_ will be utilized (has to be installed) and
various formats can be written.
.. _GraphViz: http://www.graphviz.org/
:Copyright: 2007 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
from optparse import OptionParser
from graphviz import Digraph, Graph
from graphviz.files import ENGINES, FORMATS
from analyze import load_data
def generate_dot(nicknames, relations, name, format, engine, directed=False):
"""Create dot graph representations."""
# Create graph.
dot_attrs = {
'name': name,
'format': format,
'engine': engine,
}
if directed:
dot = Digraph(**dot_attrs)
else:
dot = Graph(**dot_attrs)
# Create nodes.
for nickname in nicknames:
dot.node(nickname, label=nickname)
# Create edges.
max_count = float(max(rel[2] for rel in relations))
max_width = 4
for nickname1, nickname2, count in sorted(relations, key=lambda x: x[0]):
width = (count / max_count * max_width) + 1
dot.edge(nickname1, nickname2, style='setlinewidth(%d)' % width)
return dot
def write_file(dot):
"""Create a graphics file from the DOT data."""
rendered_filename = dot.render(filename=dot.name)
print "Wrote %s output to '%s' using %s." \
% (dot.format, rendered_filename, dot.engine)
if __name__ == '__main__':
# Create parser.
parser = OptionParser(
usage='usage: %prog [options] <data filename> <output filename prefix>')
parser.add_option('-f', '--format', dest='format', default='dot',
choices=sorted(FORMATS),
help='output format supported by GraphViz (default: dot)')
parser.add_option('-p', '--prog', dest='prog', default='dot',
choices=sorted(ENGINES),
help='GraphViz program to create output with (default: dot)')
# Parse command-line input.
opts, args = parser.parse_args()
try:
input_filename, output_filename = args
except ValueError:
parser.print_help()
parser.exit()
# Draw graphs.
nicknames, relations, directed = load_data(input_filename)
dot = generate_dot(nicknames, relations, output_filename, opts.format,
engine=opts.prog, directed=directed)
write_file(dot)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chat Relater's Visualizer
~~~~~~~~~~~~~~~~~~~~~~~~~
Visualize relations between chat partners.
For graphical output, GraphViz_ will be utilized (has to be installed) and
various formats can be written.
.. _GraphViz: http://www.graphviz.org/
:Copyright: 2007 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
from optparse import OptionParser
from graphviz import Digraph, Graph
from graphviz.files import FORMATS
from analyze import load_data
def generate_dot(nicknames, relations, name, format, engine, directed=False):
"""Create dot graph representations."""
# Create graph.
dot_attrs = {
'name': name,
'format': format,
'engine': engine,
}
if directed:
dot = Digraph(**dot_attrs)
else:
dot = Graph(**dot_attrs)
# Create nodes.
for nickname in nicknames:
dot.node(nickname, label=nickname)
# Create edges.
max_count = float(max(rel[2] for rel in relations))
max_width = 4
for nickname1, nickname2, count in sorted(relations, key=lambda x: x[0]):
width = (count / max_count * max_width) + 1
dot.edge(nickname1, nickname2, style='setlinewidth(%d)' % width)
return dot
def write_file(dot):
"""Create a graphics file from the DOT data."""
rendered_filename = dot.render(filename=dot.name)
print "Wrote %s output to '%s' using %s." \
% (dot.format, rendered_filename, dot.engine)
if __name__ == '__main__':
# Create parser.
parser = OptionParser(
usage='usage: %prog [options] <data filename> <output filename prefix>')
parser.add_option('-f', '--format', dest='format', default='dot',
choices=sorted(FORMATS),
help='output format supported by GraphViz (default: dot)')
parser.add_option('-p', '--prog', dest='prog', default='dot',
choices=('dot', 'twopi', 'neato', 'circo', 'fdp'),
help='GraphViz program to create output with (default: dot)')
# Parse command-line input.
opts, args = parser.parse_args()
try:
input_filename, output_filename = args
except ValueError:
parser.print_help()
parser.exit()
# Draw graphs.
nicknames, relations, directed = load_data(input_filename)
dot = generate_dot(nicknames, relations, output_filename, opts.format,
engine=opts.prog, directed=directed)
write_file(dot)
| mit | Python |
11f178ec6b2e6c62dbf72901022690797b9525fe | Update nn.py | AndysDeepAbstractions/deep-learning,AndysDeepAbstractions/deep-learning,AndysDeepAbstractions/deep-learning | Miniflow/nn.py | Miniflow/nn.py | """
Have fun with the number of epochs!
Be warned that if you increase them too much,
the VM will time out :)
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.utils import shuffle, resample
from miniflow import *
# Load data
data = load_boston()
X_ = data['data']
y_ = data['target']
# Normalize data
X_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)
n_features = X_.shape[1]
n_hidden = 10
W1_ = np.random.randn(n_features, n_hidden)
b1_ = np.zeros(n_hidden)
W2_ = np.random.randn(n_hidden, 1)
b2_ = np.zeros(1)
# Neural network
X, y = Input(), Input()
W1, b1 = Input(), Input()
W2, b2 = Input(), Input()
l1 = Linear(X, W1, b1)
s1 = Sigmoid(l1)
l2 = Linear(s1, W2, b2)
cost = MSE(y, l2)
feed_dict = {
X: X_,
y: y_,
W1: W1_,
b1: b1_,
W2: W2_,
b2: b2_
}
epochs = 1000
# Total number of examples
m = X_.shape[0]
batch_size = 11
steps_per_epoch = m // batch_size
graph = topological_sort(feed_dict)
trainables = [W1, b1, W2, b2]
print("Total number of examples = {}".format(m))
# Step 4
t1 = 1/1000
t2 = t1 / 1.05
learning_rate = 0.1
loss = 0
lossprev = loss
loss_delta_rel = 1
loss_delta_rel_xa1 = 1
loss_delta_rel_xa2 = 1
loss_delta_rel_xa_rel = 1
for i in range(epochs):
if (lossprev > 0):
loss_delta_rel = lossprev/loss
loss_delta_rel_xa1 = (loss_delta_rel_xa1 * 1-(t1)) + (loss_delta_rel * (t1))
loss_delta_rel_xa2 = (loss_delta_rel_xa2 * 1-(t2)) + (loss_delta_rel * (t2))
loss_delta_rel_xa_rel = loss_delta_rel_xa2 / loss_delta_rel_xa1
learning_rate = learning_rate * loss_delta_rel_xa_rel
lossprev = loss
loss = 0
for j in range(steps_per_epoch):
# Step 1
# Randomly sample a batch of examples
X_batch, y_batch = resample(X_, y_, n_samples=batch_size)
# Reset value of X and y Inputs
X.value = X_batch
y.value = y_batch
# Step 2
forward_and_backward(graph)
# Step 3
sgd_update(trainables,learning_rate)
loss += graph[-1].value
print("Epoch: {}, Loss: {:.3f}, delta_rel: {:.3f}, xa_rel: {:.6f} lr: {:.6f}".format(i+1, loss/steps_per_epoch, loss_delta_rel, loss_delta_rel_xa_rel, learning_rate))
| """
Have fun with the number of epochs!
Be warned that if you increase them too much,
the VM will time out :)
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.utils import shuffle, resample
from miniflow import *
# Load data
data = load_boston()
X_ = data['data']
y_ = data['target']
# Normalize data
X_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)
n_features = X_.shape[1]
n_hidden = 10
W1_ = np.random.randn(n_features, n_hidden)
b1_ = np.zeros(n_hidden)
W2_ = np.random.randn(n_hidden, 1)
b2_ = np.zeros(1)
# Neural network
X, y = Input(), Input()
W1, b1 = Input(), Input()
W2, b2 = Input(), Input()
l1 = Linear(X, W1, b1)
s1 = Sigmoid(l1)
l2 = Linear(s1, W2, b2)
cost = MSE(y, l2)
feed_dict = {
X: X_,
y: y_,
W1: W1_,
b1: b1_,
W2: W2_,
b2: b2_
}
epochs = 1000
# Total number of examples
m = X_.shape[0]
batch_size = 11
steps_per_epoch = m // batch_size
graph = topological_sort(feed_dict)
trainables = [W1, b1, W2, b2]
print("Total number of examples = {}".format(m))
# Step 4
learning_rate=1e-3
for i in range(epochs):
loss = 0
for j in range(steps_per_epoch):
# Step 1
# Randomly sample a batch of examples
X_batch, y_batch = resample(X_, y_, n_samples=batch_size)
# Reset value of X and y Inputs
X.value = X_batch
y.value = y_batch
# Step 2
forward_and_backward(graph)
# Step 3
sgd_update(trainables,learning_rate)
loss += graph[-1].value
print("Epoch: {}, Loss: {:.3f}".format(i+1, loss/steps_per_epoch))
| mit | Python |
5c61bbc02ff6b4bb875123b438312fa4d513dd6c | add isRunning | GuillaumeDerval/INGInious,GuillaumeDerval/INGInious,GuillaumeDerval/INGInious,layus/INGInious,layus/INGInious,layus/INGInious,GuillaumeDerval/INGInious,layus/INGInious | modules/job_manager.py | modules/job_manager.py | import Queue
import threading
class JobManager (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
# Monitor lock and check
condition.acquire()
if main_queue.empty():
condition.wait()
# Launch the task
jobId,task,inputdata = main_queue.get()
main_dict[jobId] = {"task":task,"result":"Done","input":inputdata}
# Monitor notify
condition.notify()
condition.release()
def addJob(task, inputdata):
# Monitor lock
condition.acquire()
# Put task in the job queue
addJob.cur_id += 1
jobId = 'job' + `addJob.cur_id`
main_queue.put((jobId,task,inputdata))
main_dict[jobId] = None
# Monitor notify
condition.notify()
condition.release()
# Returns the jobId
return jobId
def isRunning(jobId):
if main_dict.has_key(jobId):
return main_dict[jobId] == None
else:
return False
def isDone(jobId):
if main_dict.has_key(jobId):
return main_dict[jobId] != None
else:
return False
def getResult(jobId):
result = None
# Delete result from dictionary if there is sth
if not main_dict[jobId] == None:
result = main_dict[jobId]
del main_dict[jobId]
return result
# Initialization
addJob.cur_id = 0 # static variable
condition = threading.Condition()
main_queue = Queue.Queue()
main_dict = {}
# Launch the main thread
main_thread = JobManager()
main_thread.daemon = True
main_thread.start()
| import Queue
import threading
class JobManager (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
# Monitor lock and check
condition.acquire()
if main_queue.empty():
condition.wait()
# Launch the task
jobId,task,inputdata = main_queue.get()
main_dict[jobId] = {"task":task,"result":"Done","input":inputdata}
# Monitor notify
condition.notify()
condition.release()
def addJob(task, inputdata):
# Monitor lock
condition.acquire()
# Put task in the job queue
addJob.cur_id += 1
jobId = 'job' + `addJob.cur_id`
main_queue.put((jobId,task,inputdata))
main_dict[jobId] = None
# Monitor notify
condition.notify()
condition.release()
# Returns the jobId
return jobId
def isDone(jobId):
if main_dict.has_key(jobId):
return main_dict[jobId] != None
else:
return False
def getResult(jobId):
result = None
# Delete result from dictionary if there is sth
if not main_dict[jobId] == None:
result = main_dict[jobId]
del main_dict[jobId]
return result
# Initialization
addJob.cur_id = 0 # static variable
condition = threading.Condition()
main_queue = Queue.Queue()
main_dict = {}
# Launch the main thread
main_thread = JobManager()
main_thread.daemon = True
main_thread.start()
| agpl-3.0 | Python |
21eeceb718d11bc9eccbdfc1c5fc68a0d2c00ae5 | Update TCPReverseShell.py | lismore/OffensiveCyberTools | Shells/Python/Client/TCPReverseShell.py | Shells/Python/Client/TCPReverseShell.py | # Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments
# Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore
#=========================================================================================================================================
# Python TCP Client
import socket
import subprocess
#Start client function
def startClient():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create the socket object 'sock'
sock.connect(('192.168.1.95', 5000)) # Replace the IP and listening port to your attack machine
while True: # start an infinite loop
sentCommand = sock.recv(1024) # read the 1st KB of the tcp socket
if 'terminate' in sentCommand: # if we get a termiante string from the attack machine then we will close the socket, end the loop
sock.close()
break
else: # or else, the sent command gets sent to the victim shell process
CMD = subprocess.Popen(sentCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
sock.send( CMD.stdout.read() ) # return shell result
sock.send( CMD.stderr.read() ) # return any shell errors
#Main function
def main ():
startClient()
#Program entry point
main()
| # Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments
# Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore
#=======================================================================================================================================================================
# Python TCP Client
import socket
import subprocess
#Start client function
def startClient():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create the socket object 'sock'
sock.connect(('192.168.1.95', 5000)) # Replace the IP and listening port to your attack machine
while True: # start an infinite loop
sentCommand = sock.recv(1024) # read the 1st KB of the tcp socket
if 'terminate' in sentCommand: # if we get a termiante string from the attack machine then we will close the socket, end the loop
sock.close()
break
else: # or else, the sent command gets sent to the victim shell process
CMD = subprocess.Popen(sentCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
sock.send( CMD.stdout.read() ) # return shell result
sock.send( CMD.stderr.read() ) # return any shell errors
#Main function
def main ():
startClient()
#Program entry point
main()
| mit | Python |
5acedf6fab1607f8dea0cdc46df66daea870a275 | Update fastq.py | arq5x/poretools,arq5x/poretools | poretools/fastq.py | poretools/fastq.py | import Fast5File
import sys
def run(parser, args):
for fast5 in Fast5File.Fast5FileSet(args.files):
if args.start_time or args.end_time:
read_start_time = fast5.get_start_time()
read_end_time = fast5.get_end_time()
if args.start_time and args.start_time > read_start_time:
fast5.close()
continue
if args.end_time and args.end_time < read_end_time:
fast5.close()
continue
fas = fast5.get_fastqs(args.type)
if args.high_quality:
if fast5.get_complement_events_count() <= \
fast5.get_template_events_count():
fast5.close()
continue
for fa in fas:
if fa is None or \
len(fa.seq) < args.min_length or \
(len(fa.seq) > args.max_length and \
args.max_length > 0):
continue
print fa
fast5.close()
| import Fast5File
import sys
def run(parser, args):
for fast5 in Fast5File.Fast5FileSet(args.files):
if args.start_time or args.end_time:
read_start_time = fast5.get_start_time()
read_end_time = fast5.get_end_time()
if args.start_time and args.start_time > read_start_time:
fast5.close()
continue
if args.end_time and args.end_time < read_end_time:
fast5.close()
continue
fas = fast5.get_fastqs(args.type)
if args.high_quality:
if fast5.get_complement_events_count() <= \
fast5.get_template_events_count():
fast5.close()
continue
for fa in fas:
if fa is None or \
len(fa.seq) < args.min_length or \
len(fa.seq) > args.max_length and \
args.max_length > 0:
continue
print fa
fast5.close()
| mit | Python |
45b48a660c0854719f77821f0617c096b89c927d | Fix mimetype for project pages | belxlaz/portfolio,belxlaz/portfolio | portfolio/views.py | portfolio/views.py | from flask import abort, Blueprint, g, make_response, render_template
from portfolio.minify import render_minified
from portfolio.projects import Project
from portfolio.sitemap import Sitemap
site = Blueprint('site', __name__, static_folder='static')
projects = Project()
# home
@site.route('/')
def index():
return render_minified('home.html', projects=projects.ordered())
# project pages
@site.route('/<key>/')
def portfolio(key):
# check if project exists
if not projects.exist(key):
return abort(404)
# load project info
project = projects.get(key)
g.title = project['title']
g.cover = project['cover']
g.tagline = project['tagline']
return render_minified('{}.html'.format(key),
project=project,
suggestions=projects.suggestion(key, 6))
# seo and browser
@site.route('/robots.txt')
def robots():
response = make_response('User-agent: *\nDisallow:')
response.headers['Content-Type'] = 'text/plain'
return response
@site.route('/sitemap.xml')
def sitemap():
info = Sitemap(project_list=projects.order)
xml = render_template('sitemap.xml', pages=info.pages)
response = make_response(xml)
response.headers['Content-Type'] = 'application/xml'
return response
@site.route('/favicon.ico')
def favicon():
return site.send_static_file('imgs/favicon/favicon.ico')
# title and seo info auto generator
@site.context_processor
def title():
# basic values
name = 'Mabel Lazzarin'
about = "{}'s Portfolio | UX & Visual Designer".format(name)
image = 'cover.jpg'
# load page specific values
subtitle = g.get('title', None)
tagline = g.get('tagline', None)
title = '{} | {}'.format(subtitle, name) if subtitle else name
description = tagline if tagline else about
cover = g.get('cover', image)
# set page class
page_class = 'home' if name == title else 'project'
# return values
return {'name': name,
'title': title,
'description': description,
'cover': cover,
'page_class': page_class}
| from flask import abort, Blueprint, g, make_response, render_template
from portfolio.minify import render_minified
from portfolio.projects import Project
from portfolio.sitemap import Sitemap
site = Blueprint('site', __name__, static_folder='static')
projects = Project()
# home
@site.route('/')
def index():
return render_minified('home.html', projects=projects.ordered())
# project pages
@site.route('/<key>')
def portfolio(key):
# check if project exists
if not projects.exist(key):
return abort(404)
# load project info
project = projects.get(key)
g.title = project['title']
g.cover = project['cover']
g.tagline = project['tagline']
return render_minified('{}.html'.format(key),
project=project,
suggestions=projects.suggestion(key, 6))
# seo and browser
@site.route('/robots.txt')
def robots():
response = make_response('User-agent: *\nDisallow:')
response.headers['Content-Type'] = 'text/plain'
return response
@site.route('/sitemap.xml')
def sitemap():
info = Sitemap(project_list=projects.order)
xml = render_template('sitemap.xml', pages=info.pages)
response = make_response(xml)
response.headers['Content-Type'] = 'application/xml'
return response
@site.route('/favicon.ico')
def favicon():
return site.send_static_file('imgs/favicon/favicon.ico')
# title and seo info auto generator
@site.context_processor
def title():
# basic values
name = 'Mabel Lazzarin'
about = "{}'s Portfolio | UX & Visual Designer".format(name)
image = 'cover.jpg'
# load page specific values
subtitle = g.get('title', None)
tagline = g.get('tagline', None)
title = '{} | {}'.format(subtitle, name) if subtitle else name
description = tagline if tagline else about
cover = g.get('cover', image)
# set page class
page_class = 'home' if name == title else 'project'
# return values
return {'name': name,
'title': title,
'description': description,
'cover': cover,
'page_class': page_class}
| mit | Python |
512cfb2cedd3a3d2ae8b0d7d28bcb97f81492857 | Index dicts removed. | stbraun/mind_monitor | mind_monitor/monitor_common.py | mind_monitor/monitor_common.py | # coding=utf-8
"""
Common definitions.
"""
# Copyright (c) 2015 Stefan Braun
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from collections import namedtuple
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
TRecord = namedtuple('TRecord', ['session', 'timestamp',
'highAlpha', 'highBeta', 'highGamma',
'delta', 'theta',
'lowAlpha', 'lowBeta', 'lowGamma',
'attention', 'meditation', 'poorSignalQuality'])
TRaw = namedtuple('TRaw', ['session', 'timestamp', 'data'])
| # coding=utf-8
"""
Common definitions.
"""
# Copyright (c) 2015 Stefan Braun
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from collections import namedtuple
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
TRecord = namedtuple('TRecord', ['session', 'timestamp',
'highAlpha', 'highBeta', 'highGamma',
'delta', 'theta',
'lowAlpha', 'lowBeta', 'lowGamma',
'attention', 'meditation', 'poorSignalQuality'])
TRaw = namedtuple('TRaw', ['session', 'timestamp', 'data'])
# Indices into record tuple
idx_record = {'session': 0, 'timestamp': 1, 'highAlpha': 2, 'highBeta': 3, 'highGamma': 4,
'delta': 5,
'theta': 6,
'lowAlpha': 7, 'lowBeta': 8, 'lowGamma': 9,
'attention': 10, 'meditation': 11, 'poorSignalQuality': 12}
# Indices into raw data record
idx_raw = {'session': 0, 'timestamp': 1, 'data': 2}
| mit | Python |
565c95ce9a8ff96d177196c6dbf8d8f88cdfa029 | Add an error class for string data that is ignored by the parser | hackebrot/poyo | poyo/exceptions.py | poyo/exceptions.py | # -*- coding: utf-8 -*-
class PoyoException(Exception):
"""Super class for all of Poyo's exceptions."""
class NoMatchException(PoyoException):
"""Raised when the parser cannot find a pattern that matches the given
string.
"""
class NoParentException(PoyoException):
"""Raised when there is no parser object at the given level.
"""
class NoTypeException(PoyoException):
"""Raised when the parser is unable to determine the actual type for a
given string.
"""
class IgnoredMatchException(PoyoException):
"""Raised when a match does result in a Python representation such as a
comment or a blank line.
"""
| # -*- coding: utf-8 -*-
class PoyoException(Exception):
"""Super class for all of Poyo's exceptions."""
class NoMatchException(PoyoException):
"""Raised when the parser cannot find a pattern that matches the given
string.
"""
class NoParentException(PoyoException):
"""Raised when there is no parser object at the given level.
"""
class NoTypeException(PoyoException):
"""Raised when the parser is unable to determine the actual type for a
given string.
"""
| mit | Python |
8dc6c31e6c8970e9cda1bb298beb1d3b0620be58 | Remove feedparser | nerevu/riko,nerevu/riko | pipe2py/modules/pipefetchsitefeed.py | pipe2py/modules/pipefetchsitefeed.py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pipe2py.modules.pipefetchsitefeed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
http://pipes.yahoo.com/pipes/docs?doc=sources#FetchSiteFeed
"""
import speedparser
from urllib2 import urlopen
from pipe2py.lib import autorss
from pipe2py.lib import utils
from pipe2py.lib.dotdict import DotDict
def pipe_fetchsitefeed(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses the first feed found on one or more
sites. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_fetchsitefeed loading:", url
for link in autorss.getRSSLink(url.encode('utf-8')):
parsed = speedparser.parse(urlopen(link).read())
for entry in utils.gen_entries(parsed):
yield entry
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break
| # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pipe2py.modules.pipefetchsitefeed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
http://pipes.yahoo.com/pipes/docs?doc=sources#FetchSiteFeed
"""
try:
import speedparser as feedparser
except ImportError:
import feedparser
feedparser.USER_AGENT = (
"pipe2py (feedparser/%s) +https://github.com/ggaughan/pipe2py" %
feedparser.__version__
)
from urllib2 import urlopen
from pipe2py.lib import autorss
from pipe2py.lib import utils
from pipe2py.lib.dotdict import DotDict
def pipe_fetchsitefeed(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses the first feed found on one or more
sites. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_fetchsitefeed loading:", url
for link in autorss.getRSSLink(url.encode('utf-8')):
parsed = feedparser.parse(urlopen(link).read())
for entry in utils.gen_entries(parsed):
yield entry
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break
| mit | Python |
9d8dcc1d97f68fe8f6de25fac9737304e8ec05ff | Save dates in ISO format | ayushgoel/flock-message-reporter,ayushgoel/flock-message-reporter,ayushgoel/flock-message-reporter,ayushgoel/flock-message-reporter | events.py | events.py | from flask import request
import redis
import config
import datetime
import json
redis_client = redis.StrictRedis(host=config.redis_config["host"], port=config.redis_config["port"])
def handle_app_install(request):
print "Handling app install", request.json
userID = request.json["userId"]
token = request.json["token"]
redis_client.set(userID, token)
def handle_app_uninstall(request):
print "Handling app uninstall", request.json
userID = request.json["userId"]
redis_client.delete(userID)
def date_key():
return "{}-{:02d}".format(datetime.date.today().year, datetime.date.today().month)
def handle_message_action(request):
print "Handling message action", request.json
messagesUIDs = request.json["messageUids"]
for messageUID in messagesUIDs:
if not redis_client.exists(messageUID):
print "Saving UID", messageUID
key = date_key()
redis_client.rpush(key, messageUID)
redis_client.set(messageUID, json.dumps(request.json))
else:
print "Ignoring UID", messageUID
def messageDetailsForUID(UID):
if redis_client.exists(UID):
details = redis_client.get(UID)
print "Got UID", UID, details
return json.loads(details)
return None
def UIDsForMonth(month):
if redis_client.exists(month):
details = redis_client.lrange(month, 0, -1)
print "Got UID", month, details
return details
return None
| from flask import request
import redis
import config
import datetime
import json
redis_client = redis.StrictRedis(host=config.redis_config["host"], port=config.redis_config["port"])
def handle_app_install(request):
print "Handling app install", request.json
userID = request.json["userId"]
token = request.json["token"]
redis_client.set(userID, token)
def handle_app_uninstall(request):
print "Handling app uninstall", request.json
userID = request.json["userId"]
redis_client.delete(userID)
def date_key():
return "{0}-{1}".format(datetime.date.today().year, datetime.date.today().month)
def handle_message_action(request):
print "Handling message action", request.json
messagesUIDs = request.json["messageUids"]
for messageUID in messagesUIDs:
if not redis_client.exists(messageUID):
print "Saving UID", messageUID
key = date_key()
redis_client.rpush(key, messageUID)
redis_client.set(messageUID, json.dumps(request.json))
else:
print "Ignoring UID", messageUID
def messageDetailsForUID(UID):
if redis_client.exists(UID):
details = redis_client.get(UID)
print "Got UID", UID, details
return json.loads(details)
return None
def UIDsForMonth(month):
if redis_client.exists(month):
details = redis_client.lrange(month, 0, -1)
print "Got UID", month, details
return details
return None
| mit | Python |
3eba9b8c2c3a1736549cbe6f5dfcb8fd05da62ab | Add GET /tags | wking/nmhive,wking/nmhive | nmhive.py | nmhive.py | #!/usr/bin/env python
import json
import mailbox
import tempfile
import urllib.request
import flask
import flask_cors
app = flask.Flask(__name__)
flask_cors.CORS(app)
_AVAILABLE_TAGS = {
'bug',
'needs-review',
'obsolete',
'patch',
}
_TAGS = {}
@app.route('/tags', methods=['GET'])
def tags():
return flask.Response(
response=json.dumps(sorted(_AVAILABLE_TAGS)),
mimetype='application/json')
@app.route('/mid/<message_id>', methods=['GET', 'POST'])
def message_id_tags(message_id):
if flask.request.method == 'POST':
tags = _TAGS.get(message_id, set())
new_tags = tags.copy()
for change in flask.request.get_json():
if change.startswith('+'):
new_tags.add(change[1:])
elif change.startswith('-'):
try:
new_tags.remove(change[1:])
except KeyError:
return flask.Response(status=400)
else:
return flask.Response(status=400)
_TAGS[message_id] = new_tags
return flask.Response(
response=json.dumps(sorted(new_tags)),
mimetype='application/json')
elif flask.request.method == 'GET':
try:
tags = _TAGS[message_id]
except KeyError:
return flask.Response(status=404)
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/gmane/<group>/<int:article>', methods=['GET'])
def gmane_message_id(group, article):
url = 'http://download.gmane.org/{}/{}/{}'.format(
group, article, article + 1)
response = urllib.request.urlopen(url=url, timeout=3)
mbox_bytes = response.read()
with tempfile.NamedTemporaryFile(prefix='nmbug-', suffix='.mbox') as f:
f.write(mbox_bytes)
mbox = mailbox.mbox(path=f.name)
_, message = mbox.popitem()
message_id = message['message-id']
return flask.Response(
response=message_id.lstrip('<').rstrip('>'),
mimetype='text/plain')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| #!/usr/bin/env python
import json
import mailbox
import tempfile
import urllib.request
import flask
import flask_cors
app = flask.Flask(__name__)
flask_cors.CORS(app)
_TAGS = {}
@app.route('/mid/<message_id>', methods=['GET', 'POST'])
def message_id_tags(message_id):
if flask.request.method == 'POST':
tags = _TAGS.get(message_id, set())
new_tags = tags.copy()
for change in flask.request.get_json():
if change.startswith('+'):
new_tags.add(change[1:])
elif change.startswith('-'):
try:
new_tags.remove(change[1:])
except KeyError:
return flask.Response(status=400)
else:
return flask.Response(status=400)
_TAGS[message_id] = new_tags
return flask.Response(
response=json.dumps(sorted(new_tags)),
mimetype='application/json')
elif flask.request.method == 'GET':
try:
tags = _TAGS[message_id]
except KeyError:
return flask.Response(status=404)
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/gmane/<group>/<int:article>', methods=['GET'])
def gmane_message_id(group, article):
url = 'http://download.gmane.org/{}/{}/{}'.format(
group, article, article + 1)
response = urllib.request.urlopen(url=url, timeout=3)
mbox_bytes = response.read()
with tempfile.NamedTemporaryFile(prefix='nmbug-', suffix='.mbox') as f:
f.write(mbox_bytes)
mbox = mailbox.mbox(path=f.name)
_, message = mbox.popitem()
message_id = message['message-id']
return flask.Response(
response=message_id.lstrip('<').rstrip('>'),
mimetype='text/plain')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| bsd-2-clause | Python |
a621333a4c4fcc161f076980c9147faa555de5de | Store firmware size as last 4 bytes of padding area. | oopy/micropython,TDAbboud/micropython,alex-robbins/micropython,tobbad/micropython,AriZuu/micropython,SHA2017-badge/micropython-esp32,kerneltask/micropython,jmarcelino/pycom-micropython,lowRISC/micropython,tuc-osg/micropython,MrSurly/micropython-esp32,hosaka/micropython,pozetroninc/micropython,tuc-osg/micropython,tuc-osg/micropython,alex-robbins/micropython,chrisdearman/micropython,deshipu/micropython,swegener/micropython,jmarcelino/pycom-micropython,redbear/micropython,micropython/micropython-esp32,dxxb/micropython,trezor/micropython,pozetroninc/micropython,deshipu/micropython,chrisdearman/micropython,puuu/micropython,matthewelse/micropython,adafruit/micropython,adafruit/circuitpython,micropython/micropython-esp32,hosaka/micropython,alex-robbins/micropython,dxxb/micropython,bvernoux/micropython,Peetz0r/micropython-esp32,blazewicz/micropython,matthewelse/micropython,Timmenem/micropython,lowRISC/micropython,adafruit/circuitpython,TDAbboud/micropython,turbinenreiter/micropython,chrisdearman/micropython,torwag/micropython,ganshun666/micropython,bvernoux/micropython,tuc-osg/micropython,ganshun666/micropython,hosaka/micropython,adafruit/circuitpython,torwag/micropython,lowRISC/micropython,infinnovation/micropython,hosaka/micropython,henriknelson/micropython,swegener/micropython,TDAbboud/micropython,pramasoul/micropython,blazewicz/micropython,dxxb/micropython,AriZuu/micropython,tralamazza/micropython,alex-march/micropython,alex-march/micropython,lowRISC/micropython,oopy/micropython,toolmacher/micropython,ryannathans/micropython,AriZuu/micropython,tobbad/micropython,puuu/micropython,pozetroninc/micropython,TDAbboud/micropython,micropython/micropython-esp32,bvernoux/micropython,tobbad/micropython,MrSurly/micropython,adafruit/micropython,tralamazza/micropython,tobbad/micropython,toolmacher/micropython,ryannathans/micropython,cwyark/micropython,jmarcelino/pycom-micropython,hiway/micropython,trezor/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,matthewelse/micropython,mhoffma/micropython,ganshun666/micropython,dxxb/micropython,kerneltask/micropython,torwag/micropython,redbear/micropython,hiway/micropython,trezor/micropython,blazewicz/micropython,redbear/micropython,TDAbboud/micropython,dmazzella/micropython,HenrikSolver/micropython,PappaPeppar/micropython,puuu/micropython,matthewelse/micropython,henriknelson/micropython,HenrikSolver/micropython,tralamazza/micropython,mhoffma/micropython,Timmenem/micropython,ryannathans/micropython,Peetz0r/micropython-esp32,MrSurly/micropython,blazewicz/micropython,pramasoul/micropython,tobbad/micropython,MrSurly/micropython-esp32,oopy/micropython,pfalcon/micropython,selste/micropython,jmarcelino/pycom-micropython,turbinenreiter/micropython,puuu/micropython,hosaka/micropython,Peetz0r/micropython-esp32,alex-robbins/micropython,infinnovation/micropython,bvernoux/micropython,HenrikSolver/micropython,hiway/micropython,hiway/micropython,MrSurly/micropython-esp32,selste/micropython,SHA2017-badge/micropython-esp32,cwyark/micropython,mhoffma/micropython,trezor/micropython,torwag/micropython,pfalcon/micropython,PappaPeppar/micropython,tuc-osg/micropython,Peetz0r/micropython-esp32,deshipu/micropython,SHA2017-badge/micropython-esp32,ryannathans/micropython,turbinenreiter/micropython,infinnovation/micropython,swegener/micropython,adafruit/micropython,adafruit/circuitpython,matthewelse/micropython,pozetroninc/micropython,micropython/micropython-esp32,ganshun666/micropython,Timmenem/micropython,puuu/micropython,deshipu/micropython,cwyark/micropython,oopy/micropython,chrisdearman/micropython,blazewicz/micropython,mhoffma/micropython,redbear/micropython,swegener/micropython,infinnovation/micropython,torwag/micropython,lowRISC/micropython,adafruit/circuitpython,turbinenreiter/micropython,HenrikSolver/micropython,pozetroninc/micropython,PappaPeppar/micropython,alex-robbins/micropython,alex-march/micropython,MrSurly/micropython,MrSurly/micropython-esp32,PappaPeppar/micropython,swegener/micropython,kerneltask/micropython,adafruit/circuitpython,pramasoul/micropython,selste/micropython,HenrikSolver/micropython,pramasoul/micropython,Timmenem/micropython,dmazzella/micropython,adafruit/micropython,adafruit/micropython,SHA2017-badge/micropython-esp32,chrisdearman/micropython,pfalcon/micropython,alex-march/micropython,jmarcelino/pycom-micropython,dxxb/micropython,dmazzella/micropython,trezor/micropython,toolmacher/micropython,selste/micropython,ganshun666/micropython,AriZuu/micropython,henriknelson/micropython,kerneltask/micropython,henriknelson/micropython,tralamazza/micropython,kerneltask/micropython,hiway/micropython,redbear/micropython,infinnovation/micropython,cwyark/micropython,dmazzella/micropython,selste/micropython,cwyark/micropython,Timmenem/micropython,toolmacher/micropython,pramasoul/micropython,mhoffma/micropython,turbinenreiter/micropython,Peetz0r/micropython-esp32,toolmacher/micropython,pfalcon/micropython,deshipu/micropython,MrSurly/micropython,pfalcon/micropython,MrSurly/micropython,PappaPeppar/micropython,MrSurly/micropython-esp32,micropython/micropython-esp32,ryannathans/micropython,matthewelse/micropython,oopy/micropython,bvernoux/micropython,alex-march/micropython,AriZuu/micropython | esp8266/makeimg.py | esp8266/makeimg.py | import sys
import struct
SEGS_MAX_SIZE = 0x9000
assert len(sys.argv) == 4
with open(sys.argv[3], 'wb') as fout:
with open(sys.argv[1], 'rb') as f:
data_flash = f.read()
fout.write(data_flash)
print('flash ', len(data_flash))
with open(sys.argv[2], 'rb') as f:
data_rom = f.read()
pad = b'\xff' * (SEGS_MAX_SIZE - len(data_flash))
assert len(pad) >= 4
fout.write(pad[:-4])
fout.write(struct.pack("I", SEGS_MAX_SIZE + len(data_rom)))
print('padding ', len(pad))
fout.write(data_rom)
print('irom0text', len(data_rom))
print('total ', SEGS_MAX_SIZE + len(data_rom))
| import sys
SEGS_MAX_SIZE = 0x9000
assert len(sys.argv) == 4
with open(sys.argv[3], 'wb') as fout:
with open(sys.argv[1], 'rb') as f:
data_flash = f.read()
fout.write(data_flash)
print('flash ', len(data_flash))
pad = b'\xff' * (SEGS_MAX_SIZE - len(data_flash))
fout.write(pad)
print('padding ', len(pad))
with open(sys.argv[2], 'rb') as f:
data_rom = f.read()
fout.write(data_rom)
print('irom0text', len(data_rom))
print('total ', SEGS_MAX_SIZE + len(data_rom))
| mit | Python |
cf095ece24f2b50298afd1d5f5855771e2685cff | Fix media root to work with the new namespace. | graingert/gutter-django,graingert/gutter-django,graingert/gutter-django,disqus/gutter-django,disqus/gutter-django,disqus/gutter-django,disqus/gutter-django | gargoyle/web/nexus_modules.py | gargoyle/web/nexus_modules.py | """
gargoyle.nexus_modules
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import nexus
import os
from gargoyle.client.singleton import gargoyle
from django.http import HttpResponse, HttpResponseNotFound
class GargoyleModule(nexus.NexusModule):
home_url = 'index'
name = 'gargoyle'
media_root = os.path.normpath(os.path.join(os.path.dirname(__file__), 'media'))
def get_title(self):
return 'Gargoyle'
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^add/$', self.as_view(self.add), name='add'),
url(r'^update/$', self.as_view(self.update), name='update'),
url(r'^delete/$', self.as_view(self.delete), name='delete'),
url(r'^status/$', self.as_view(self.status), name='status'),
url(r'^conditions/add/$', self.as_view(self.add_condition), name='add-condition'),
url(r'^conditions/remove/$', self.as_view(self.remove_condition), name='remove-condition'),
url(r'^$', self.as_view(self.index), name='index'),
)
return urlpatterns
def render_on_dashboard(self, request):
return 'switches'
def index(self, request):
pass
def add(self, request):
pass
def update(self, request):
pass
def status(self, request):
pass
def delete(self, request):
pass
def add_condition(self, request):
pass
def remove_condition(self, request):
pass
nexus.site.register(GargoyleModule, 'gargoyle') | """
gargoyle.nexus_modules
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import nexus
class GargoyleModule(nexus.NexusModule):
home_url = 'index'
name = 'gargoyle'
def get_title(self):
return 'Gargoyle'
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^add/$', self.as_view(self.add), name='add'),
url(r'^update/$', self.as_view(self.update), name='update'),
url(r'^delete/$', self.as_view(self.delete), name='delete'),
url(r'^status/$', self.as_view(self.status), name='status'),
url(r'^conditions/add/$', self.as_view(self.add_condition), name='add-condition'),
url(r'^conditions/remove/$', self.as_view(self.remove_condition), name='remove-condition'),
url(r'^$', self.as_view(self.index), name='index'),
)
return urlpatterns
def render_on_dashboard(self, request):
return 'switches'
def index(self, request):
pass
def add(self, request):
pass
def update(self, request):
pass
def status(self, request):
pass
def delete(self, request):
pass
def add_condition(self, request):
pass
def remove_condition(self, request):
pass
nexus.site.register(GargoyleModule, 'gargoyle') | apache-2.0 | Python |
6fb2746386fe541ea56ba9264e96591da62476e0 | fix (pre-existing) bug with wrong filename | simonvh/genomepy | genomepy/plugins/blacklist.py | genomepy/plugins/blacklist.py | import os.path
import re
import sys
import zlib
from urllib.request import urlopen
from genomepy.plugin import Plugin
class BlacklistPlugin(Plugin):
base_url = "http://mitra.stanford.edu/kundaje/akundaje/release/blacklists/"
http_dict = {
"ce10": base_url + "ce10-C.elegans/ce10-blacklist.bed.gz",
"dm3": base_url + "dm3-D.melanogaster/dm3-blacklist.bed.gz",
"hg38": base_url + "hg38-human/hg38.blacklist.bed.gz",
"hg19": base_url + "hg38-human/hg19.blacklist.bed.gz",
"mm9": base_url + "mm9-mouse/mm9-blacklist.bed.gz",
"mm10": base_url + "mm10-mouse/mm10.blacklist.bed.gz",
}
def after_genome_download(self, genome, force=False):
props = self.get_properties(genome)
fname = props["blacklist"]
if force and os.path.exists(fname):
# Start from scratch
os.remove(fname)
if not os.path.exists(fname):
link = self.http_dict.get(genome.name.split('.')[0])
if link is None:
sys.stderr.write("No blacklist found for {}\n".format(genome.name))
return
try:
sys.stderr.write("Downloading blacklist {}\n".format(link))
response = urlopen(link)
with open(fname, "wb") as bed:
# unzip the response with some zlib magic
unzipped = zlib.decompress(
response.read(), 16 + zlib.MAX_WBITS
)
bed.write(unzipped)
except Exception as e:
sys.stderr.write(e)
sys.stderr.write(
"Could not download blacklist file from {}".format(link)
)
def get_properties(self, genome):
props = {
"blacklist": re.sub(".fa(.gz)?$", ".blacklist.bed", genome.filename)
}
return props
| import os.path
import re
import sys
import zlib
from urllib.request import urlopen
from genomepy.plugin import Plugin
class BlacklistPlugin(Plugin):
base_url = "http://mitra.stanford.edu/kundaje/akundaje/release/blacklists/"
http_dict = {
"ce10": base_url + "ce10-C.elegans/ce10-blacklist.bed.gz",
"dm3": base_url + "dm3-D.melanogaster/dm3-blacklist.bed.gz",
"hg38": base_url + "hg38-human/hg38.blacklist.bed.gz",
"hg19": base_url + "hg38-human/hg19.blacklist.bed.gz",
"mm9": base_url + "mm9-mouse/mm9-blacklist.bed.gz",
"mm10": base_url + "mm10-mouse/mm10.blacklist.bed.gz",
}
def after_genome_download(self, genome, force=False):
props = self.get_properties(genome)
fname = props["blacklist"]
if force and os.path.exists(fname):
# Start from scratch
os.remove(fname)
if not os.path.exists(fname):
link = self.http_dict.get(genome.name)
if link is None:
sys.stderr.write("No blacklist found for {}\n".format(genome.name))
return
try:
sys.stderr.write("Downloading blacklist {}\n".format(link))
response = urlopen(link)
with open(fname, "wb") as bed:
# unzip the response with some zlib magic
unzipped = zlib.decompress(
response.read(), 16 + zlib.MAX_WBITS
).decode("utf-8")
bed.write(unzipped)
except Exception as e:
sys.stderr.write(e)
sys.stderr.write(
"Could not download blacklist file from {}".format(link)
)
def get_properties(self, genome):
props = {
"blacklist": re.sub(".fa(.gz)?$", ".blacklist.bed", genome.filename)
}
return props
| mit | Python |
3e769d5bdc19eb65da0972226d9fb4abb5d149db | update example for button type template | ben-cunningham/pybot,ben-cunningham/python-messenger-bot | example/example.py | example/example.py | from flask import Flask
from flask import request
import sys
import os
sys.path.append("..")
from fbmsgbot.bot import Bot
from fbmsgbot.models.message import Message
from fbmsgbot.models.template import Template
from fbmsgbot.models.attachment import WebUrlButton, Element
import json
app = Flask(__name__)
bot = Bot(os.environ['PYBOT_TOKEN'])
def set_welcome():
response, error = bot.set_welcome("Welcome to PyBot!")
print response
print error
@app.route('/', methods=['GET', 'POST'])
def webhook():
if request.args.get("hub.verify_token") == 'test_token':
return request.args.get("hub.challenge")
msgs = bot.messages_for_request(request)
for m in msgs:
msg = None
if not hasattr(m, 'text'):
break
if m.text == 'generic button':
buttons = []
b = WebUrlButton('google', 'https://www.google.ca')
buttons.append(b)
elements = [Element('test', 'http://www.newton.ac.uk/files/covers/968361.jpg', 'test subtitle', buttons)]
tmpl = Template('generic', elements=elements)
payload = tmpl
msg = Message('template', payload)
elif m.text == 'button':
buttons = []
b = WebUrlButton('google', 'https://www.google.ca')
buttons.append(b)
tmpl = Template('button', buttons=buttons,
title='What site do you want to go to?')
payload = tmpl
msg = Message('template', payload)
else:
payload = m.text
msg = Message('text', payload)
response, error = bot.send_message(msg, m.sender)
if error:
return 'Bad Request'
return 'OK'
if __name__ == "__main__":
app.debug = True
set_welcome()
app.run(port=8000)
| from flask import Flask
from flask import request
import sys
import os
sys.path.append("..")
from fbmsgbot.bot import Bot
from fbmsgbot.models.message import Message
from fbmsgbot.models.template import Template
from fbmsgbot.models.attachment import WebUrlButton, Element
import json
app = Flask(__name__)
bot = Bot(os.environ['PYBOT_TOKEN'])
def set_welcome():
response, error = bot.set_welcome("Welcome to PyBot!")
print response
print error
@app.route('/', methods=['GET', 'POST'])
def webhook():
if request.args.get("hub.verify_token") == 'test_token':
return request.args.get("hub.challenge")
msgs = bot.messages_for_request(request)
for m in msgs:
msg = None
if not hasattr(m, 'text'):
break
if m.text == 'button':
buttons = []
b = WebUrlButton('google', 'https://www.google.ca')
buttons.append(b)
elements = [Element('test', 'http://www.newton.ac.uk/files/covers/968361.jpg', 'test subtitle', buttons)]
tmpl = Template('generic', elements=elements)
payload = tmpl
msg = Message('template', payload)
else:
payload = m.text
msg = Message('text', payload)
response, error = bot.send_message(msg, m.sender)
if error:
return 'Bad Request'
return 'OK'
if __name__ == "__main__":
app.debug = True
set_welcome()
app.run(port=8000)
| mit | Python |
502167c451b5ebcfa0a18568fd992d942cc34c86 | Add missing imports | Tigge/platinumshrimp | plugins/invitejoiner/invitejoiner.py | plugins/invitejoiner/invitejoiner.py | from __future__ import division, absolute_import, print_function, unicode_literals
import sys
import plugin
from twisted.python import log
class Invitejoiner(plugin.Plugin):
def __init__(self):
plugin.Plugin.__init__(self, "Invitejoiner")
def invited(self, server, channel):
log.msg("Invited to: ", channel)
self.join(server, channel)
if __name__ == "__main__":
sys.exit(Invitejoiner.run())
|
import plugin
from twisted.python import log
class Invitejoiner(plugin.Plugin):
def __init__(self):
plugin.Plugin.__init__(self, "Invitejoiner")
def invited(self, server, channel):
log.msg("Invited to: ", channel)
self.join(server, channel)
if __name__ == "__main__":
sys.exit(Invitejoiner.run())
| mit | Python |
e17a5d6c23eedd44917745cdf4afcba6b85721c4 | Fix issue detected by mypy. | apache/libcloud,andrewsomething/libcloud,mistio/libcloud,Kami/libcloud,apache/libcloud,Kami/libcloud,Kami/libcloud,mistio/libcloud,apache/libcloud,andrewsomething/libcloud,mistio/libcloud,andrewsomething/libcloud | example_compute.py | example_compute.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
EC2 = get_driver(Provider.EC2)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [EC2('access key id', 'secret key', region='us-east-1'),
Rackspace('username', 'api key', region='iad')]
nodes = []
for driver in drivers:
nodes.extend(driver.list_nodes())
print(nodes)
# [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>,
# <Node: provider=Rackspace, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ]
# grab the node named "test"
node = [n for n in nodes if n.name == 'test'][0]
# reboot "test"
node.reboot()
| # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
EC2 = get_driver(Provider.EC2)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [EC2('access key id', 'secret key', region='us-east-1'),
Rackspace('username', 'api key', region='iad')]
nodes = [driver.list_nodes() for driver in drivers]
print(nodes)
# [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>,
# <Node: provider=Rackspace, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ]
# grab the node named "test"
node = [n for n in nodes if n.name == 'test'][0]
# reboot "test"
node.reboot()
| apache-2.0 | Python |
3e7267ed1a2e31fbb3fc1bd10535c53b1d8dcf85 | simplify examples | botify-labs/simpleflow,botify-labs/simpleflow | examples/canvas.py | examples/canvas.py | from __future__ import print_function
import time
from simpleflow import (
activity,
futures,
Workflow,
)
from simpleflow.canvas import Group, Chain
from simpleflow.task import ActivityTask
@activity.with_attributes(task_list='example', version='example')
def increment_slowly(x):
time.sleep(1)
return x + 1
@activity.with_attributes(task_list='example', version='example')
def multiply(numbers):
val = 1
for n in numbers:
val *= n
return val
@activity.with_attributes(task_list='example', version='example')
def fail_incrementing(x):
raise ValueError("Failure on CPU intensive operation '+'")
# This workflow demonstrates the use of simpleflow's Chains and Groups
#
# A `Group` wraps a list of tasks that can be executed in parallel. It
# returns a future that is considered "finished" only once ALL the tasks
# in the group are finished.
#
# A `Chain` wraps a list of tasks that need to be executed sequentially.
# As groups, it returns a future that is considered "finished" only
# when all the tasks inside the Chain are finished.
class CanvasWorkflow(Workflow):
name = 'canvas'
version = 'example'
task_list = 'example'
def run(self):
x = 1
y = 2
z = 3
future = self.submit(
Chain(
Group(
(increment_slowly, x),
(increment_slowly, y),
(increment_slowly, z),
),
multiply,
send_result=True
)
)
futures.wait(future)
res = future.result[-1]
print('({}+1)*({}+1)*({}+1) = {}'.format(x, y, z, res))
# Canva's and Group's can also be "optional"
future = self.submit(
Chain(
(fail_incrementing, x),
(increment_slowly, (), dict(x=1)), # never executed
raises_on_failure=False,
)
)
futures.wait(future)
print('SUCCESS!')
| from __future__ import print_function
import time
from simpleflow import (
activity,
futures,
Workflow,
)
from simpleflow.canvas import Group, Chain
from simpleflow.task import ActivityTask
@activity.with_attributes(task_list='example', version='example')
def increment_slowly(x):
time.sleep(1)
return x + 1
@activity.with_attributes(task_list='example', version='example')
def multiply(numbers):
val = 1
for n in numbers:
val *= n
return val
@activity.with_attributes(task_list='example', version='example')
def fail_incrementing(x):
raise ValueError("Failure on CPU intensive operation '+'")
# This workflow demonstrates the use of simpleflow's Chains and Groups
#
# A `Group` wraps a list of tasks that can be executed in parallel. It
# returns a future that is considered "finished" only once ALL the tasks
# in the group are finished.
#
# A `Chain` wraps a list of tasks that need to be executed sequentially.
# As groups, it returns a future that is considered "finished" only
# when all the tasks inside the Chain are finished.
class CanvasWorkflow(Workflow):
name = 'canvas'
version = 'example'
task_list = 'example'
def run(self):
x = 1
y = 2
z = 3
future = self.submit(
Chain(
Group(
ActivityTask(increment_slowly, x),
ActivityTask(increment_slowly, y),
ActivityTask(increment_slowly, z),
),
ActivityTask(multiply),
send_result=True
)
)
futures.wait(future)
res = future.result[-1]
print('({}+1)*({}+1)*({}+1) = {}'.format(x, y, z, res))
# Canva's and Group's can also be "optional"
future = self.submit(
Chain(
ActivityTask(fail_incrementing, x),
ActivityTask(increment_slowly, 1), # never executed
raises_on_failure=False,
)
)
futures.wait(future)
print('SUCCESS!')
| mit | Python |
12f01d84b86c09db731c582a8600f89b3f513fc2 | update syntax for server example | pavlov99/json-rpc | examples/server.py | examples/server.py | """ Example of json-rpc usage with Wergzeug and requests.
NOTE: there are no Werkzeug and requests in dependencies of json-rpc.
NOTE: server handles all url paths the same way (there are no different urls).
"""
from werkzeug.wrappers import Request, Response
from werkzeug.serving import run_simple
from jsonrpc import JSONRPCResponseManager, dispatcher
@dispatcher.add_method
def foobar(**kwargs):
return kwargs["foo"] + kwargs["bar"]
@Request.application
def application(request):
# Dispatcher is dictionary {<method_name>: callable}
dispatcher["echo"] = lambda s: s
dispatcher["add"] = lambda a, b: a + b
response = JSONRPCResponseManager.handle(
request.data, dispatcher)
return Response(response.json, mimetype='application/json')
if __name__ == '__main__':
run_simple('localhost', 4000, application)
| """ Example of json-rpc usage with Wergzeug and requests.
NOTE: there are no Werkzeug and requests in dependencies of json-rpc.
NOTE: server handles all url paths the same way (there are no different urls).
"""
from werkzeug.wrappers import Request, Response
from werkzeug.serving import run_simple
from jsonrpc.jsonrpc import JSONRPCResponseManager
@Request.application
def application(request):
# Dispatcher is a dictionary {<method_name>: callable function}
dispatcher = {
"echo": lambda s: s,
"add": lambda a, b: a + b,
"foobar": lambda **kwargs: kwargs["foo"] + kwargs["bar"],
}
response = JSONRPCResponseManager.handle(
request.data, dispatcher)
return Response(response.json, mimetype='application/json')
if __name__ == '__main__':
run_simple('localhost', 4000, application)
| mit | Python |
55af90dd1c4aa7caa1dc34b8ff1cca2f27e9d748 | Add atom features to simple.py example | crcollins/molml | examples/simple.py | examples/simple.py | from molml.features import CoulombMatrix
from molml.features import LocalCoulombMatrix
# Define some base data
H2_ELES = ['H', 'H']
H2_NUMS = [1, 1]
H2_COORDS = [
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
]
H2_CONNS = {
0: {1: '1'},
1: {0: '1'},
}
HCN_ELES = ['H', 'C', 'N']
HCN_NUMS = [1, 6, 7]
HCN_COORDS = [
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
]
HCN_CONNS = {
0: {1: '1'},
1: {0: '1', 2: '3'},
2: {1: '3'},
}
if __name__ == "__main__":
# Example of Generating the Coulomb matrix with just elements and coords.
feat = CoulombMatrix()
H2 = (H2_ELES, H2_COORDS)
HCN = (HCN_ELES, HCN_COORDS)
feat.fit([H2, HCN])
print "Transformed H2"
print feat.transform([H2])
print "H2 and HCN transformed"
print feat.transform([H2, HCN])
print
# Example of generating the Coulomb matrix with elements, coords, and
# connections.
feat = CoulombMatrix()
H2_conn = (H2_ELES, H2_COORDS, H2_CONNS)
HCN_conn = (HCN_ELES, HCN_COORDS, HCN_CONNS)
print feat.fit_transform([H2_conn, HCN_conn])
print
# Example of generating the Coulomb matrix using a specified input_type
print "User specified input_type"
feat = CoulombMatrix(input_type=("coords", "numbers"))
H2_spec = (H2_COORDS, H2_NUMS)
HCN_spec = (HCN_COORDS, HCN_NUMS)
print feat.fit_transform([H2_spec, HCN_spec])
print
# Example of generating the Local Coulomb matrix (atom-wise
# representation)
print "Atom feature"
feat = LocalCoulombMatrix()
print feat.fit_transform([H2, HCN])
| from molml.features import CoulombMatrix
# Define some base data
H2_ELES = ['H', 'H']
H2_NUMS = [1, 1]
H2_COORDS = [
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
]
H2_CONNS = {
0: {1: '1'},
1: {0: '1'},
}
HCN_ELES = ['H', 'C', 'N']
HCN_NUMS = [1, 6, 7]
HCN_COORDS = [
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
]
HCN_CONNS = {
0: {1: '1'},
1: {0: '1', 2: '3'},
2: {1: '3'},
}
if __name__ == "__main__":
# Example of Generating the Coulomb matrix with just elements and coords.
feat = CoulombMatrix()
H2 = (H2_ELES, H2_COORDS)
HCN = (HCN_ELES, HCN_COORDS)
feat.fit([H2, HCN])
print "Transformed H2"
print feat.transform([H2])
print "H2 and HCN transformed"
print feat.transform([H2, HCN])
print
# Example of generating the Coulomb matrix with elements, coords, and
# connections.
feat = CoulombMatrix()
H2_conn = (H2_ELES, H2_COORDS, H2_CONNS)
HCN_conn = (HCN_ELES, HCN_COORDS, HCN_CONNS)
print feat.fit_transform([H2_conn, HCN_conn])
print
# Example of generating the Coulomb matrix using a specified input_type
print "User specified input_type"
feat = CoulombMatrix(input_type=("coords", "numbers"))
H2_spec = (H2_COORDS, H2_NUMS)
HCN_spec = (HCN_COORDS, HCN_NUMS)
print feat.fit_transform([H2_spec, HCN_spec])
| mit | Python |
85722c2efeaeb606591012fd19831fc31d199f86 | move start_analysis to xpdAn | NSLS-II-XPD/ipython_ophyd,NSLS-II-XPD/ipython_ophyd | profile_analysis/startup/999-load.py | profile_analysis/startup/999-load.py | from xpdan.startup.analysis import start_analysis | from bluesky.callbacks.zmq import RemoteDispatcher
from bluesky.utils import install_qt_kicker
# setup glbl
# from xpdan.pipelines.callback import MainCallback
from xpdan.pipelines.main import raw_source
from xpdan.pipelines.main import (mask_kwargs as _mask_kwargs,
pdf_kwargs as _pdf_kwargs,
fq_kwargs as _fq_kwargs,
mask_setting as _mask_setting)
def start_analysis(mask_kwargs=None, pdf_kwargs=None, fq_kwargs=None,
mask_setting=None):
"""Start analysis pipeline
Parameters
----------
mask_kwargs: dict
The kwargs passed to the masking see xpdtools.tools.mask_img
pdf_kwargs: dict
The kwargs passed to the pdf generator, see xpdtools.tools.pdf_getter
fq_kwargs: dict
The kwargs passed to the fq generator, see xpdtools.tools.fq_getter
mask_setting: dict
The setting of the mask
"""
# TODO: fix this ip address to the perma-proxy
d = RemoteDispatcher('10.28.0.202:5578')
install_qt_kicker(
loop=d.loop) # This may need to be d._loop depending on tag
if mask_setting is None:
mask_setting = {}
if fq_kwargs is None:
fq_kwargs = {}
if pdf_kwargs is None:
pdf_kwargs = {}
if mask_kwargs is None:
mask_kwargs = {}
for a, b in zip([mask_kwargs, pdf_kwargs, fq_kwargs, mask_setting],
[_mask_kwargs, _pdf_kwargs, _fq_kwargs, _mask_setting]):
if a:
b.update(a)
d.subscribe(raw_source.emit)
d.start()
| bsd-2-clause | Python |
241a729208f8e8fa4e914145a1e9288766ad9e9d | revert owlbot main branch templates (#39) | googleapis/python-essential-contacts,googleapis/python-essential-contacts | owlbot.py | owlbot.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
default_version = "v1"
for library in s.get_staging_dirs(default_version):
s.move(library, excludes=["setup.py", "README.rst", "docs/index.rst"])
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(cov_level=97, microgenerator=True)
python.py_samples(skip_readmes=True)
s.move(templated_files, excludes=["setup.py", "README.rst", "docs/index.rst"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
default_version = "v1"
for library in s.get_staging_dirs(default_version):
s.move(library, excludes=["setup.py", "README.rst", "docs/index.rst"])
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(cov_level=97, microgenerator=True)
python.py_samples(skip_readmes=True)
s.move(templated_files, excludes=["setup.py", "README.rst", "docs/index.rst"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
# ----------------------------------------------------------------------------
# Main Branch migration
# ----------------------------------------------------------------------------
s.replace(
"*.rst",
"master",
"main"
)
s.replace(
"CONTRIBUTING.rst",
"kubernetes/community/blob/main",
"kubernetes/community/blob/master"
)
s.replace(
"docs/*",
"master",
"main"
)
s.replace(
"docs/conf.py",
"main_doc",
"root_doc"
)
s.replace(
".kokoro/*",
"master",
"main"
)
s.replace(
"README.rst",
"google-cloud-python/blob/main/README.rst",
"google-cloud-python/blob/master/README.rst"
)
s.replace(
"docs/README.rst",
"google-cloud-python/blob/main/README.rst",
"google-cloud-python/blob/master/README.rst"
)
| apache-2.0 | Python |
8f4d99f67b42fd6518d0b968c5ac2c39f3159211 | Change Windows virtualenv command | sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis,appi147/Jarvis,appi147/Jarvis | installer/steps/a_setup_virtualenv.py | installer/steps/a_setup_virtualenv.py | import os
import re
from helper import *
import unix_windows
section("Preparing virtualenv")
# check that virtualenv installed
virtualenv_installed = False
if unix_windows.IS_WIN:
virtualenv_installed = shell(unix_windows.VIRTUALENV_CMD + ' -h').success()
else:
virtualenv_installed = executable_exists('virtualenv')
if not virtualenv_installed:
fail("""\
Please install virtualenv!
https://github.com/pypa/virtualenv
{}""".format(unix_windows.VIRTUALENV_INSTALL_MSG))
# Make sure that not running in virtualenv
if hasattr(sys, 'real_prefix'):
fail("""Please exit virtualenv!""")
# Check if 'env' already exists + is virtualenv
virtualenv_exists = False
if os.path.isdir("env"):
if shell(unix_windows.VIRTUALENV_CMD).success():
virtualenv_exists = True
# Create virtualenv if necessary
if not virtualenv_exists:
if unix_windows.IS_WIN:
shell("py -3 -m virtualenv env").should_not_fail()
else:
shell("virtualenv env --python=python3").should_not_fail()
| import os
import re
from helper import *
import unix_windows
section("Preparing virtualenv")
# check that virtualenv installed
if not executable_exists('virtualenv'):
fail("""\
Please install virtualenv!
https://github.com/pypa/virtualenv
{}""".format(unix_windows.VIRTUALENV_INSTALL_MSG))
# Make sure that not running in virtualenv
if hasattr(sys, 'real_prefix'):
fail("""Please exit virtualenv!""")
# Check if 'env' already exists + is virtualenv
virtualenv_exists = False
if os.path.isdir("env"):
if shell(unix_windows.VIRTUALENV_CMD).success():
virtualenv_exists = True
# Create virtualenv if necessary
if not virtualenv_exists:
if unix_windows.IS_WIN:
shell("virtualenv env").should_not_fail()
else:
shell("virtualenv env --python=python3").should_not_fail()
| mit | Python |
115774c49493f59161bde814eecd77b5beb617a9 | Allow BROKER_URL environment setting. | HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily | lily/settings/celeryconfig.py | lily/settings/celeryconfig.py | import os
from datetime import timedelta
from kombu import Queue
from .settings import DEBUG, TIME_ZONE
BROKER_URL = os.environ.get('BROKER_URL')
if not BROKER_URL:
BROKER = os.environ.get('BROKER', 'DEV')
if BROKER == 'IRONMQ':
BROKER_URL = 'ironmq://%s:%s@mq-aws-eu-west-1.iron.io' % (os.environ.get('IRON_MQ_PROJECT_ID'), os.environ.get('IRON_MQ_TOKEN'))
elif BROKER == 'CLOUDAMQP':
BROKER_URL = os.environ.get('CLOUDAMQP_URL')
else:
BROKER_URL = 'amqp://guest@%s:5672' % os.environ.get('BROKER_HOST', '127.0.0.1')
BROKER_POOL_LIMIT = 128
CELERY_ACCEPT_CONTENT = ['json'] # ignore other content
CELERY_ANNOTATIONS = {
'*': {
'time_limit': 3600.0,
},
}
CELERY_DEFAULT_QUEUE = 'queue1'
CELERY_ENABLE_UTC = True
CELERY_RESULT_SERIALIZER = 'json'
CELERY_SEND_TASK_ERROR_EMAILS = not DEBUG
# CELERY_SEND_TASK_SENT_EVENT = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = os.environ.get('REDISTOGO_URL', 'redis://localhost:6379')
CELERY_TASK_RESULT_EXPIRES = 300
CELERY_TIMEZONE = TIME_ZONE
CELERY_QUEUES = (
Queue('queue1', routing_key='email_async_tasks'),
Queue('queue2', routing_key='email_scheduled_tasks'),
)
CELERY_ROUTES = (
{'synchronize_email_account_scheduler': { # schedule priority email tasks without interference
'queue': 'queue2'
}},
{'synchronize_email_account': { # schedule priority email tasks without interference
'queue': 'queue2'
}},
{'first_synchronize_email_account': { # schedule priority email tasks without interference
'queue': 'queue2'
}},
)
CELERYBEAT_SCHEDULE = {
'synchronize_email_account_scheduler': {
'task': 'synchronize_email_account_scheduler',
'schedule': timedelta(seconds=int(os.environ.get('EMAIL_SYNC_INTERVAL', 60))),
},
}
| import os
from datetime import timedelta
from kombu import Queue
from .settings import DEBUG, TIME_ZONE
BROKER = os.environ.get('BROKER', 'DEV')
if BROKER == 'IRONMQ':
BROKER_URL = 'ironmq://%s:%s@mq-aws-eu-west-1.iron.io' % (os.environ.get('IRON_MQ_PROJECT_ID'), os.environ.get('IRON_MQ_TOKEN'))
elif BROKER == 'CLOUDAMQP':
BROKER_URL = os.environ.get('CLOUDAMQP_URL')
else:
BROKER_URL = 'amqp://guest@%s:5672' % os.environ.get('BROKER_HOST', '127.0.0.1')
BROKER_POOL_LIMIT = 128
CELERY_ACCEPT_CONTENT = ['json'] # ignore other content
CELERY_ANNOTATIONS = {
'*': {
'time_limit': 3600.0,
},
}
CELERY_DEFAULT_QUEUE = 'queue1'
CELERY_ENABLE_UTC = True
CELERY_RESULT_SERIALIZER = 'json'
CELERY_SEND_TASK_ERROR_EMAILS = not DEBUG
# CELERY_SEND_TASK_SENT_EVENT = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = os.environ.get('REDISTOGO_URL', 'redis://localhost:6379')
CELERY_TASK_RESULT_EXPIRES = 300
CELERY_TIMEZONE = TIME_ZONE
CELERY_QUEUES = (
Queue('queue1', routing_key='email_async_tasks'),
Queue('queue2', routing_key='email_scheduled_tasks'),
)
CELERY_ROUTES = (
{'synchronize_email_account_scheduler': { # schedule priority email tasks without interference
'queue': 'queue2'
}},
{'synchronize_email_account': { # schedule priority email tasks without interference
'queue': 'queue2'
}},
{'first_synchronize_email_account': { # schedule priority email tasks without interference
'queue': 'queue2'
}},
)
CELERYBEAT_SCHEDULE = {
'synchronize_email_account_scheduler': {
'task': 'synchronize_email_account_scheduler',
'schedule': timedelta(seconds=int(os.environ.get('EMAIL_SYNC_INTERVAL', 60))),
},
}
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.