commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
78f96421598a561285b9cd5568fd4acabd52585f | Add embed site freeze generator | Opendatal/offenerhaushalt.de,Opendatal/offenerhaushalt.de,Opendatal/offenerhaushalt.de | offenerhaushalt/generators.py | offenerhaushalt/generators.py | from offenerhaushalt.core import freezer, pages, sites
@freezer.register_generator
def page():
for page in pages:
yield {'path': page.path}
@freezer.register_generator
def site():
for site in sites:
yield {'slug': site.slug}
@freezer.register_generator
def embed_site():
for site in sites:
yield {'slug': site.slug}
|
from offenerhaushalt.core import freezer, pages, sites
@freezer.register_generator
def page():
for page in pages:
yield {'path': page.path}
@freezer.register_generator
def site():
for site in sites:
yield {'slug': site.slug} | mit | Python |
9fda25c0a28f7965c2378dcd4b2106ca034052c3 | Handle missing accounts on HaveIBeenPwned properly. | sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria | plumeria/plugins/have_i_been_pwned.py | plumeria/plugins/have_i_been_pwned.py | import plumeria.util.http as http
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.command.parse import Text
from plumeria.message.mappings import build_mapping
from plumeria.util.collections import SafeStructure
from plumeria.util.ratelimit import rate_limit
@commands.register("haveibeenpwned", "pwned", category="Search", params=[Text('query')])
@rate_limit(burst_size=4)
async def have_i_been_pwned(message, query):
"""
Checks where an account (specified by account name or email address) exists
on sites that have experienced data breaches.
Example::
pwned email@example.com
"""
try:
r = await http.get("https://haveibeenpwned.com/api/v2/breachedaccount/" + query, headers=[
('User-Agent', 'Plumeria chat bot (+https://gitlab.com/sk89q/Plumeria)')
])
except http.BadStatusCodeError as e:
if e.http_code == 404:
raise CommandError("Account not found! (That's good.)")
else:
raise e
results = SafeStructure(r.json())
return build_mapping(
[(e.Title, "{} ({} breached) ({})".format(e.BreachDate, e.PwnCount, ", ".join(e.DataClasses))) for e in
results])
| import plumeria.util.http as http
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.command.parse import Text
from plumeria.message.mappings import build_mapping
from plumeria.util.collections import SafeStructure
from plumeria.util.ratelimit import rate_limit
@commands.register("haveibeenpwned", "pwned", category="Search", params=[Text('query')])
@rate_limit(burst_size=4)
async def have_i_been_pwned(message, query):
"""
Checks where an account (specified by account name or email address) exists
on sites that have experienced data breaches.
Example::
pwned email@example.com
"""
r = await http.get("https://haveibeenpwned.com/api/v2/breachedaccount/" + query, headers=[
('User-Agent', 'Plumeria chat bot (+https://gitlab.com/sk89q/Plumeria)')
])
if not len(r.text().strip()):
raise CommandError("Account not found! (That's good.)")
results = SafeStructure(r.json())
return build_mapping(
[(e.Title, "{} ({} breached) ({})".format(e.BreachDate, e.PwnCount, ", ".join(e.DataClasses))) for e in
results])
| mit | Python |
3f995558f35ec6ce28320c4d1ffda5a4f8507fd6 | Fix broken super call | richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation | groundstation/peer_socket.py | groundstation/peer_socket.py | from sockets.socket_closed_exception import SocketClosedException
from sockets.stream_socket import StreamSocket
from groundstation import settings
import groundstation.logger
log = groundstation.logger.getLogger(__name__)
class PeerSocket(StreamSocket):
"""Wrapper for a peer who just connected, or one we've connected to
Since the communication protocol should be implicitly bidirectional, the
factory methods should be the only instanciation methods"""
def __init__(self, conn, peer):
self._sock = conn
super(PeerSocket, self).__init__()
self.peer = peer
@classmethod
def from_accept(klass, args):
return klass(*args)
@classmethod
def from_connect(klass, args):
return klass(*args)
def __repr__(self):
return "<%s: from %s>" % (self.__class__, self.peer)
class PeerSocketClosedException(SocketClosedException):
"""Raised when a peer closes their socket"""
pass
| from sockets.socket_closed_exception import SocketClosedException
from sockets.stream_socket import StreamSocket
from groundstation import settings
import groundstation.logger
log = groundstation.logger.getLogger(__name__)
class PeerSocket(StreamSocket):
"""Wrapper for a peer who just connected, or one we've connected to
Since the communication protocol should be implicitly bidirectional, the
factory methods should be the only instanciation methods"""
def __init__(self, conn, peer):
self._sock = conn
super(PeerSocket, self).__init__(self)
self.peer = peer
@classmethod
def from_accept(klass, args):
return klass(*args)
@classmethod
def from_connect(klass, args):
return klass(*args)
def __repr__(self):
return "<%s: from %s>" % (self.__class__, self.peer)
class PeerSocketClosedException(SocketClosedException):
"""Raised when a peer closes their socket"""
pass
| mit | Python |
cc51137aedeee8bdcf6b47e98b195ec750183ab4 | Allow plain values, not just methods | carlmjohnson/django-context-variables | context_variables/__init__.py | context_variables/__init__.py | class context_variable(object):
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, objtype=None):
# Handle case of being called from class instead of an instance
if obj is None:
return self
# If we got a plain value, return that
if not callable(self.func):
return self.func
# Evaluate the property
value = self.func(obj)
# Save value into the instance, replacing the descriptor
object.__setattr__(obj, self.func.__name__, value)
return value
def get_context_variables(obj):
context = {}
for attr in dir(obj.__class__):
# Don't bother to check _private/__special attributes
if attr.startswith('_'):
continue
# Get attributes off the class, in case they've already been
# cached as their final values in the instance dictionary and to
# avoid general descriptor weirdness
raw_attr = getattr(obj.__class__, attr)
if isinstance(raw_attr, context_variable):
# Force evaluation of obj.`attr`
context[attr] = getattr(obj, attr)
return context
| class context_variable(object):
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, objtype=None):
# Handle case of being called from class instead of an instance
if obj is None:
return self
# Evaluate the property
value = self.func(obj)
# Save value into the instance, replacing the descriptor
object.__setattr__(obj, self.func.__name__, value)
return value
def get_context_variables(obj):
context = {}
for attr in dir(obj.__class__):
# Don't bother to check _private/__special attributes
if attr.startswith('_'):
continue
# Get attributes off the class, in case they've already been
# cached as their final values in the instance dictionary and to
# avoid general descriptor weirdness
raw_attr = getattr(obj.__class__, attr)
if isinstance(raw_attr, context_variable):
# Force evaluation of obj.`attr`
context[attr] = getattr(obj, attr)
return context
| mit | Python |
11218089bd2739b26e6b22cbb54117e27cba76ac | Fix flake error | django/channels,Coread/channels,linuxlewis/channels,andrewgodwin/django-channels,raiderrobert/channels,andrewgodwin/channels,Coread/channels,raphael-boucher/channels,Krukov/channels,Krukov/channels | channels/apps.py | channels/apps.py | from django.apps import AppConfig
from django.core.exceptions import ImproperlyConfigured
class ChannelsConfig(AppConfig):
name = "channels"
verbose_name = "Channels"
def ready(self):
# Check you're not running 1.10 or above
try:
from django import channels # NOQA isort:skip
except ImportError:
pass
else:
raise ImproperlyConfigured("You have Django 1.10 or above; use the builtin django.channels!")
# Do django monkeypatches
from .hacks import monkeypatch_django
monkeypatch_django()
| from django.apps import AppConfig
from django.core.exceptions import ImproperlyConfigured
class ChannelsConfig(AppConfig):
name = "channels"
verbose_name = "Channels"
def ready(self):
# Check you're not running 1.10 or above
try:
from django import channels
except ImportError:
pass
else:
raise ImproperlyConfigured("You have Django 1.10 or above; use the builtin django.channels!")
# Do django monkeypatches
from .hacks import monkeypatch_django
monkeypatch_django()
| bsd-3-clause | Python |
ca7bc952727e23b742570aab8bcad3be97ac6739 | fix typo | EUMSSI/EUMSSI-platform,EUMSSI/EUMSSI-platform,EUMSSI/EUMSSI-platform | preprocess/video_persons2extracted.py | preprocess/video_persons2extracted.py | #!/usr/bin/env python
from extract_converter import ExtractConverter
import click
def convert(x):
meta = {
#meta['all'] = [w['item'] for w in x['result']['content']]
'amalia': x['result']['Amalia_Json']
}
available_data = ['video_persons']
return meta, available_data
@click.command()
@click.option('--reset', is_flag=True, help="reset data_available")
@click.option('--clean', is_flag=True, help="reset data_available and remove existing meta.source")
def run(reset, clean):
conv = ExtractConverter('video_persons', convert)
conv.run()
if __name__ == '__main__':
run()
| #!/usr/bin/env python
from extract_converter import ExtractConverter
import click
def convert(x):
meta = {
#meta['all'] = [w['item'] for w in x['result']['content']]
'amalia': x['result']['Amalia_Json'])
}
available_data = ['video_persons']
return meta, available_data
@click.command()
@click.option('--reset', is_flag=True, help="reset data_available")
@click.option('--clean', is_flag=True, help="reset data_available and remove existing meta.source")
def run(reset, clean):
conv = ExtractConverter('video_persons', convert)
conv.run()
if __name__ == '__main__':
run()
| apache-2.0 | Python |
992b999e4511ef1445c14e2147147a595e51a94d | Add -usehd to excluded args in check-doc.py | chaincoin/chaincoin,chaincoin/chaincoin,chaincoin/chaincoin,chaincoin/chaincoin,chaincoin/chaincoin,chaincoin/chaincoin | contrib/devtools/check-doc.py | contrib/devtools/check-doc.py | #!/usr/bin/env python
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| mit | Python |
3557e471e158d668b7834cfc1c2e3dae438cb4cd | Add client macs obscure option handling | FreiFunkMuenster/ffmap-backend,ffac/ffmap-backend,FreifunkBremen/ffmap-backend,ff-kbu/ffmap-backend,FreiFunkMuenster/ffmap-backend,mweinelt/ffmap-backend,FreifunkBremen/ffmap-backend,ffnord/ffmap-backend,FreifunkMD/ffmap-backend,freifunkhamburg/ffmap-backend,FreifunkJena/ffmap-backend,freifunk-fulda/ffmap-backend,kpcyrd/ffmap-backend,rubo77/ffmap-backend,freifunk-mwu/ffmap-backend,ff-kbu/ffmap-backend,FreifunkMD/ffmap-backend,freifunkhamburg/ffmap-backend,freifunk-kiel/ffmap-backend,freifunk-mwu/ffmap-backend,ffnord/ffmap-backend | bat2nodes.py | bat2nodes.py | #!/usr/bin/env python3
import json
import fileinput
import argparse
import os
from batman import batman
from rrd import rrd
from nodedb import NodeDB
from d3mapbuilder import D3MapBuilder
# Force encoding to UTF-8
import locale # Ensures that subsequent open()s
locale.getpreferredencoding = lambda _=None: 'UTF-8' # are UTF-8 encoded.
import sys
#sys.stdin = open('/dev/stdin', 'r')
#sys.stdout = open('/dev/stdout', 'w')
#sys.stderr = open('/dev/stderr', 'w')
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aliases',
help='read aliases from FILE',
action='append',
metavar='FILE')
parser.add_argument('-m', '--mesh', action='append',
help='batman mesh interface')
parser.add_argument('-o', '--obscure', action='store_true',
help='obscure client macs')
parser.add_argument('-d', '--destination-directory', action='store',
help='destination directory for generated files',required=True)
args = parser.parse_args()
options = vars(args)
db = NodeDB()
if options['mesh']:
for mesh_interface in options['mesh']:
bm = batman(mesh_interface)
db.parse_vis_data(bm.vis_data())
for gw in bm.gateway_list():
db.mark_gateways(gw.mac)
else:
bm = batman()
db.parse_vis_data(bm.vis_data())
for gw in bm.gateway_list():
db.mark_gateways([gw['mac']])
if options['aliases']:
for aliases in options['aliases']:
db.import_aliases(json.load(open(aliases)))
if options['obscure']:
db.obscure_clients()
scriptdir = os.path.dirname(os.path.realpath(__file__))
rrd = rrd(scriptdir + "/nodedb/", options['destination_directory'] + "/nodes")
rrd.update_database(db)
rrd.update_images()
m = D3MapBuilder(db)
nodes_json = open(options['destination_directory'] + '/nodes.json','w')
nodes_json.write(m.build())
nodes_json.close()
| #!/usr/bin/env python3
import json
import fileinput
import argparse
import os
from batman import batman
from rrd import rrd
from nodedb import NodeDB
from d3mapbuilder import D3MapBuilder
# Force encoding to UTF-8
import locale # Ensures that subsequent open()s
locale.getpreferredencoding = lambda _=None: 'UTF-8' # are UTF-8 encoded.
import sys
#sys.stdin = open('/dev/stdin', 'r')
#sys.stdout = open('/dev/stdout', 'w')
#sys.stderr = open('/dev/stderr', 'w')
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aliases',
help='read aliases from FILE',
action='append',
metavar='FILE')
parser.add_argument('-m', '--mesh', action='append',
help='batman mesh interface')
parser.add_argument('-d', '--destination-directory', action='store',
help='destination directory for generated files',required=True)
args = parser.parse_args()
options = vars(args)
db = NodeDB()
if options['mesh']:
for mesh_interface in options['mesh']:
bm = batman(mesh_interface)
db.parse_vis_data(bm.vis_data())
for gw in bm.gateway_list():
db.mark_gateways(gw.mac)
else:
bm = batman()
db.parse_vis_data(bm.vis_data())
for gw in bm.gateway_list():
db.mark_gateways([gw['mac']])
if options['aliases']:
for aliases in options['aliases']:
db.import_aliases(json.load(open(aliases)))
scriptdir = os.path.dirname(os.path.realpath(__file__))
rrd = rrd(scriptdir + "/nodedb/", options['destination_directory'] + "/nodes")
rrd.update_database(db)
rrd.update_images()
m = D3MapBuilder(db)
nodes_json = open(options['destination_directory'] + '/nodes.json','w')
nodes_json.write(m.build())
nodes_json.close()
| bsd-3-clause | Python |
c6e52e781b0739ab63a3f4c5e49741cd690ddf95 | fix problem with un-escaped backslashes in string | StratusLab/client,StratusLab/client,StratusLab/client,StratusLab/client | cli/user/src/main/scripts/create-windows-stubs.py | cli/user/src/main/scripts/create-windows-stubs.py | #!/usr/bin/env python
import os
import stat
import glob
import StringIO
input_path='main/python/'
output_path='target/windows/'
stub="""@echo off
python %~dp0..\\bin\\%~n0 %*
"""
def process_file(file):
output_basename = os.path.basename(file)
output_name = output_basename + ".bat"
output_file = os.path.join(output_path, output_name)
print output_basename + " --> " + output_file
f = open(output_file, "w")
f.write(stub)
f.close()
os.chmod(output_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
try:
os.makedirs(output_path)
except OSError as e:
print "Ignoring directory create error. Directory may exist."
print e
for file in glob.glob(os.path.join(input_path, 'stratus-*')):
process_file(file)
| #!/usr/bin/env python
import os
import stat
import glob
import StringIO
input_path='main/python/'
output_path='target/windows/'
stub='''@echo off
python %~dp0..\bin\%~n0 %*
'''
def process_file(file):
output_basename = os.path.basename(file)
output_name = output_basename + ".bat"
output_file = os.path.join(output_path, output_name)
print output_basename + " --> " + output_file
f = open(output_file, "w")
f.write(stub)
f.close()
os.chmod(output_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
try:
os.makedirs(output_path)
except OSError as e:
print "Ignoring directory create error. Directory may exist."
print e
for file in glob.glob(os.path.join(input_path, 'stratus-*')):
process_file(file)
| apache-2.0 | Python |
717ca3e94106ebe00aa8876267d98cddc1c390b5 | fix coveralls | MySmile/sfchat,MySmile/sfchat,MySmile/sfchat,MySmile/sfchat | config/local.py | config/local.py | # -*- coding: utf-8 -*-
# secret settings
import os
from mongoengine import connect, register_connection
from sfchat.settings.base import BASE_DIR
DEBUG = True
COMPRESS_ENABLED = False
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# @NOTE: if True then enable 'debug_toolbar.middleware.DebugToolbarMiddleware' also
TEMPLATE_DEBUG = DEBUG
INTERNAL_IPS = '127.0.0.1'
SECRET_KEY = '&ku!ebrl5h61ztet=c&ydh+sc9tkq=b70^xbx461)l1pp!lgt6'
DEFAULT_CONNECTION_NAME = 'sfchat'
MONGODB_DATABASE_NAME = 'sfchat'
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_USERNAME = ''
MONGODB_PASSWORD = ''
connect(MONGODB_DATABASE_NAME,
host=MONGODB_HOST,
port=MONGODB_PORT,
username=MONGODB_USERNAME,
password=MONGODB_PASSWORD)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'sfchat': {
'ENGINE': 'django.db.backends.dummy',
},
# 'sfchat': {
# 'ENGINE': 'django.db.backends.dummy',
# #~ 'USER': '',
# #~ 'PASSWORD': '',
# #~ 'HOST': '',
# #~ 'PORT': '',
# },
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'sfchat/db/sfchat_admin.sqlite3'),
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': '',
}
}
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
ADMINS = (
('admin', 'info@mysmile.com.ua'),
)
# Google Analytics
GOOGLE_ANALYTICS_TRACKING_ID = 'UA-57194449-2'
GOOGLE_ANALYTICS_DEBUG_MODE = True
MEDIA_ROOT = os.path.join(BASE_DIR, '/media/')
STATIC_ROOT = os.path.join(BASE_DIR, 'sfchat/static/')
COMPRESS_ROOT = STATIC_ROOT
DATABASE_ROUTERS = ['apps.chat.router.SFChatRouter', 'apps.chat.router.AdminRouter',]
| # -*- coding: utf-8 -*-
# secret settings
import os
from mongoengine import connect, register_connection
from sfchat.settings.base import BASE_DIR
DEBUG = True
COMPRESS_ENABLED = False
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# @NOTE: if True then enable 'debug_toolbar.middleware.DebugToolbarMiddleware' also
TEMPLATE_DEBUG = DEBUG
INTERNAL_IPS = '127.0.0.1'
SECRET_KEY = '&ku!ebrl5h61ztet=c&ydh+sc9tkq=b70^xbx461)l1pp!lgt6'
DEFAULT_CONNECTION_NAME = 'sfchat'
MONGODB_DATABASE_NAME = 'sfchat'
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_USERNAME = ''
MONGODB_PASSWORD = ''
connect(MONGODB_DATABASE_NAME,
host=MONGODB_HOST,
port=MONGODB_PORT,
username=MONGODB_USERNAME,
password=MONGODB_PASSWORD,
alias='default')
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'sfchat': {
'ENGINE': 'django.db.backends.dummy',
},
# 'sfchat': {
# 'ENGINE': 'django.db.backends.dummy',
# #~ 'USER': '',
# #~ 'PASSWORD': '',
# #~ 'HOST': '',
# #~ 'PORT': '',
# },
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'sfchat/db/sfchat_admin.sqlite3'),
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': '',
}
}
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
ADMINS = (
('admin', 'info@mysmile.com.ua'),
)
# Google Analytics
GOOGLE_ANALYTICS_TRACKING_ID = 'UA-57194449-2'
GOOGLE_ANALYTICS_DEBUG_MODE = True
MEDIA_ROOT = os.path.join(BASE_DIR, '/media/')
STATIC_ROOT = os.path.join(BASE_DIR, 'sfchat/static/')
COMPRESS_ROOT = STATIC_ROOT
DATABASE_ROUTERS = ['apps.chat.router.SFChatRouter', 'apps.chat.router.AdminRouter',]
| bsd-3-clause | Python |
2ebbacc70940f0c176a9f378571e2e904bb02c44 | decrease image in causes serializer | OpenVolunteeringPlatform/django-ovp-core,OpenVolunteeringPlatform/django-ovp-core | ovp_core/serializers/cause.py | ovp_core/serializers/cause.py | from ovp_core import models
from ovp_core import validators
from rest_framework import serializers
class CauseSerializer(serializers.ModelSerializer):
class Meta:
fields = ['id', 'name']
model = models.Cause
class CauseAssociationSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
name = serializers.CharField(read_only=True)
class Meta:
fields = ['id', 'name']
model = models.Cause
validators = [validators.cause_exist]
| from ovp_core import models
from ovp_core import validators
from rest_framework import serializers
from ovp_uploads.serializers import UploadedImageSerializer
class CauseSerializer(serializers.ModelSerializer):
image = UploadedImageSerializer()
class Meta:
fields = ['id', 'name', 'image']
model = models.Cause
class CauseAssociationSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
name = serializers.CharField(read_only=True)
class Meta:
fields = ['id', 'name']
model = models.Cause
validators = [validators.cause_exist]
| agpl-3.0 | Python |
546d16a0b30ca1738390d2e7a9e9d8e104a49140 | Bump version to 0.3.0 (#706) | quantumlib/Cirq,quantumlib/Cirq,balopat/Cirq,balopat/Cirq,quantumlib/Cirq,balopat/Cirq,quantumlib/Cirq,quantumlib/Cirq | cirq/_version.py | cirq/_version.py | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define version number here and read it from setup.py automatically"""
import sys
if sys.version_info.major == 2:
__version__ = "0.3.0.27" # coverage: ignore
else:
__version__ = "0.3.0.35"
| # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define version number here and read it from setup.py automatically"""
import sys
if sys.version_info.major == 2:
__version__ = "0.0.2.dev27" # coverage: ignore
else:
__version__ = "0.0.2.dev35"
| apache-2.0 | Python |
96db4f0f42058ba9a8917fd4e9a3d8174f91cbd3 | Update licensing info on version file | StackStorm/mistral,StackStorm/mistral | version_st2.py | version_st2.py | # Copyright 2016 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '1.6dev'
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '1.6dev'
| apache-2.0 | Python |
25b92ff94875afd3ab84e99773c025d8860ff4e9 | Update zero_one_normalization.py | greenelab/adage,greenelab/adage,greenelab/adage | Data_collection_processing/zero_one_normalization.py | Data_collection_processing/zero_one_normalization.py | '''
Linearly scale the expression range of one gene to be between 0 and 1.
If a reference dataset is provided, then the scaling of one gene in the
target dataset in done using the minimun and range of that gene in the
reference dataset.
'''
import sys
import argparse
sys.path.insert(0, 'Data_collection_processing/')
from pcl import PCLfile
parser = argparse.ArgumentParser(description="Linearly scale the expression range\
of one gene to be between 0 and 1. If a reference dataset is provided, then \
the scaling of one gene in the target dataset in done using the minimum and \
range of that gene in the reference dataset.")
parser.add_argument('tar', help='the target file for zero one normalization')
parser.add_argument('out', help='the output file after zero one normalization')
parser.add_argument('ref', help='the reference file. If reference file\
is \'None\', then zero one normalization will be done based on\
target file itself.')
args = parser.parse_args()
def zero_one_normal(tar=None, out=None, ref=None):
'''
tar: the target file for zero one normalization
out: the output file after zero one normalization
ref: the reference file. If reference file is 'None',
then zero one normalization will be done based on
target file itself.
'''
if ref == 'None':
tar_data = PCLfile(tar, skip_col=0)
tar_data.zero_one_normalization()
tar_data.write_pcl(out)
else:
ref_data = PCLfile(ref, skip_col=0)
tar_data = PCLfile(tar, skip_col=0)
for i in xrange(ref_data.data_matrix.shape[0]):
row_minimum = ref_data.data_matrix[i, :].min()
row_maximum = ref_data.data_matrix[i, :].max()
row_range = row_maximum - row_minimum
tar_data.data_matrix[i, :] =\
(tar_data.data_matrix[i, :] - row_minimum)/row_range
# bound the values to be between 0 and 1
tar_data.data_matrix[i, :] =\
[0 if x < 0 else x for x in tar_data.data_matrix[i, :]]
tar_data.data_matrix[i, :] =\
[1 if x > 1 else x for x in tar_data.data_matrix[i, :]]
tar_data.write_pcl(out)
zero_one_normal(tar=args.tar, out=args.out, ref=args.ref)
| '''
Linearly scale the expression range of one gene to be between 0 and 1.
If a reference dataset is provided, then the scaling of one gene in the
target dataset in done using the minimun and range of that gene in the
reference dataset.
'''
import sys
import argparse
sys.path.insert(0, 'Data_collection_processing/')
from pcl import PCLfile
parser = argparse.ArgumentParser(description="Linearly scale the expression range\
of one gene to be between 0 and 1. If a reference dataset is provided, then \
the scaling of one gene in the target dataset in done using the minimum and \
range of that gene in the reference dataset.")
parser.add_argument('tar', help='the target file for zero one normalization')
parser.add_argument('out', help='the output file after zero one normalization')
parser.add_argument('ref', help='the reference file. If reference file\
is \'None\', then zero one normalization will be done based on\
target file itself.')
args = parser.parse_args()
def zero_one_normal(tar=None, out=None, ref=None):
'''
tar: the target file for zero one normalization
out: the output file after zero one normalization
ref: the reference file. If reference file is 'None',
then zero one normalization will be done based on
target file itself.
'''
if ref == 'None':
tar_data = PCLfile(tar, skip_col=0)
tar_data.zero_one_normalization()
tar_data.write_pcl(out)
else:
ref_data = PCLfile(ref, skip_col=0)
tar_data = PCLfile(tar, skip_col=0)
for i in xrange(ref_data.data_matrix.shape[0]):
row_minimum = ref_data.data_matrix[i, :].min()
row_maximum = ref_data.data_matrix[i, :].max()
row_range = row_maximum - row_minimum
tar_data.data_matrix[i, :] =
(tar_data.data_matrix[i, :] - row_minimum)/row_range
# bound the values to be between 0 and 1
tar_data.data_matrix[i, :] =
[0 if x < 0 else x for x in tar_data.data_matrix[i, :]]
tar_data.data_matrix[i, :] =
[1 if x > 1 else x for x in tar_data.data_matrix[i, :]]
tar_data.write_pcl(out)
zero_one_normal(tar=args.tar, out=args.out, ref=args.ref)
| bsd-3-clause | Python |
9653f3d4d3bd859d592542fc011ad7b81a866052 | Make the widget experimental error a real python warning | jupyter-widgets/ipywidgets,cornhundred/ipywidgets,cornhundred/ipywidgets,SylvainCorlay/ipywidgets,cornhundred/ipywidgets,jupyter-widgets/ipywidgets,ipython/ipywidgets,jupyter-widgets/ipywidgets,ipython/ipywidgets,jupyter-widgets/ipywidgets,cornhundred/ipywidgets,cornhundred/ipywidgets,SylvainCorlay/ipywidgets,ipython/ipywidgets,SylvainCorlay/ipywidgets,SylvainCorlay/ipywidgets,ipython/ipywidgets,ipython/ipywidgets | IPython/html/widgets/__init__.py | IPython/html/widgets/__init__.py | from .widget import Widget, DOMWidget, CallbackDispatcher, register
from .widget_bool import Checkbox, ToggleButton
from .widget_button import Button
from .widget_box import Box, Popup, FlexBox, HBox, VBox
from .widget_float import FloatText, BoundedFloatText, FloatSlider, FloatProgress, FloatRangeSlider
from .widget_image import Image
from .widget_int import IntText, BoundedIntText, IntSlider, IntProgress, IntRangeSlider
from .widget_output import Output
from .widget_selection import RadioButtons, ToggleButtons, Dropdown, Select
from .widget_selectioncontainer import Tab, Accordion
from .widget_string import HTML, Latex, Text, Textarea
from .interaction import interact, interactive, fixed, interact_manual
from .widget_link import Link, link, DirectionalLink, dlink
# Deprecated classes
from .widget_bool import CheckboxWidget, ToggleButtonWidget
from .widget_button import ButtonWidget
from .widget_box import ContainerWidget, PopupWidget
from .widget_float import FloatTextWidget, BoundedFloatTextWidget, FloatSliderWidget, FloatProgressWidget
from .widget_image import ImageWidget
from .widget_int import IntTextWidget, BoundedIntTextWidget, IntSliderWidget, IntProgressWidget
from .widget_selection import RadioButtonsWidget, ToggleButtonsWidget, DropdownWidget, SelectWidget
from .widget_selectioncontainer import TabWidget, AccordionWidget
from .widget_string import HTMLWidget, LatexWidget, TextWidget, TextareaWidget
# Warn on import
from warnings import warn
warn("""The widget API is still considered experimental and may change in the future.""", FutureWarning, stacklevel=2)
| from .widget import Widget, DOMWidget, CallbackDispatcher, register
from .widget_bool import Checkbox, ToggleButton
from .widget_button import Button
from .widget_box import Box, Popup, FlexBox, HBox, VBox
from .widget_float import FloatText, BoundedFloatText, FloatSlider, FloatProgress, FloatRangeSlider
from .widget_image import Image
from .widget_int import IntText, BoundedIntText, IntSlider, IntProgress, IntRangeSlider
from .widget_output import Output
from .widget_selection import RadioButtons, ToggleButtons, Dropdown, Select
from .widget_selectioncontainer import Tab, Accordion
from .widget_string import HTML, Latex, Text, Textarea
from .interaction import interact, interactive, fixed, interact_manual
from .widget_link import Link, link, DirectionalLink, dlink
# Deprecated classes
from .widget_bool import CheckboxWidget, ToggleButtonWidget
from .widget_button import ButtonWidget
from .widget_box import ContainerWidget, PopupWidget
from .widget_float import FloatTextWidget, BoundedFloatTextWidget, FloatSliderWidget, FloatProgressWidget
from .widget_image import ImageWidget
from .widget_int import IntTextWidget, BoundedIntTextWidget, IntSliderWidget, IntProgressWidget
from .widget_selection import RadioButtonsWidget, ToggleButtonsWidget, DropdownWidget, SelectWidget
from .widget_selectioncontainer import TabWidget, AccordionWidget
from .widget_string import HTMLWidget, LatexWidget, TextWidget, TextareaWidget
# Warn on import
from IPython.utils.warn import warn
warn("""The widget API is still considered experimental and
may change by the next major release of IPython.""")
| bsd-3-clause | Python |
a9bf46545502afb9991f7ffd65406797b79d1716 | Correct murano-agent reference to message headers | telefonicaid/murano-agent,Bloomie/murano-agent,Bloomie/murano-agent,openstack/murano-agent,Bloomie/murano-agent,telefonicaid/murano-agent,openstack/murano-agent,telefonicaid/murano-agent,openstack/murano-agent,openstack/murano-agent,Bloomie/murano-agent | python-agent/muranoagent/common/messaging/message.py | python-agent/muranoagent/common/messaging/message.py | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import anyjson
import logging
LOG = logging.getLogger("murano-common.messaging")
class Message(object):
def __init__(self, connection=None, message_handle=None):
self._body = None
self._connection = connection
self._message_handle = message_handle
if message_handle:
self.id = message_handle.headers.get('message_id')
else:
self.id = None
try:
if message_handle:
self.body = anyjson.loads(message_handle.body)
else:
self.body = None
except ValueError as e:
self.body = None
LOG.exception(e)
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value or ''
def ack(self):
self._message_handle.ack()
| # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import anyjson
import logging
LOG = logging.getLogger("murano-common.messaging")
class Message(object):
def __init__(self, connection=None, message_handle=None):
self._body = None
self._connection = connection
self._message_handle = message_handle
if message_handle:
self.id = message_handle.header.get('message_id')
else:
self.id = None
try:
if message_handle:
self.body = anyjson.loads(message_handle.body)
else:
self.body = None
except ValueError as e:
self.body = None
LOG.exception(e)
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value or ''
def ack(self):
self._message_handle.ack()
| apache-2.0 | Python |
58b34507553a5956b99b0cb2844a4e274eda9cb4 | Resolve merge conflict in output_filters.py | corinnelhh/chatbot,corinnelhh/chatbot | output_filters.py | output_filters.py | import nltk
from nltk import pos_tag
from nltk.tokenize import wordpunct_tokenize
funct_dict = {}
def add_func_to_dict(func):
funct_dict[func.__name__] = func
return func
@add_func_to_dict
def filter_length(sentences, wordcount=13):
"""Takes in a list of sentences and returns a reduced list,
that contains only sentences with less than <wordcount> words."""
for sentence in sentences[:]:
if len(sentence.split()) > wordcount:
sentences.remove(sentence)
return sentences
@add_func_to_dict
def filter_pos(sentences):
"""Takes in a list of sentences and returns a reduced list,
that contains only sentences that contain the necessarry pos."""
content_pos = ['VB', 'NN', 'JJ']
output_sentences = []
for sentence in sentences:
words = wordpunct_tokenize(sentence)
tagged = pos_tag(words)
for word, pos in tagged:
if pos[:2] in content_pos:
output_sentences.append(sentence)
break
return output_sentences
@add_func_to_dict
def filter_NN_VV(sentences):
"""Takes in a list of sentences and returns a reduced list of
sentences that have at least one noun followed somewhere by at least
one verb.
"""
output_sentences = []
for sentence in sentences:
words = wordpunct_tokenize(sentence)
tagged = pos_tag(words)
for word, tag in tagged:
has_noun = False
if tag[:2] == "NN":
has_noun = True
if has_noun and tag[:2] == "VB":
output_sentences.append(sentence)
break
return output_sentences
| import nltk
from nltk import pos_tag
from nltk.tokenize import wordpunct_tokenize
def filter_length(sentences, wordcount=13):
"""Takes in a list of sentences and returns a reduced list,
that contains only sentences with less than <wordcount> words."""
for sentence in sentences[:]:
if len(sentence.split()) > wordcount:
sentences.remove(sentence)
return sentences
def filter_pos(sentences):
"""Takes in a list of sentences and returns a reduced list,
that contains only sentences that contain the necessarry pos."""
content_pos = ['VB', 'NN', 'JJ']
output_sentences = []
for sentence in sentences:
words = wordpunct_tokenize(sentence)
tagged = pos_tag(words)
for word, pos in tagged:
if pos[:2] in content_pos:
output_sentences.append(sentence)
break
return output_sentences
def filter_NN_VV(sentences):
"""Takes in a list of sentences and returns a reduced list of
sentences that have at least one noun followed somewhere by at least one verb
"""
output_sentences = []
for sentence in sentences:
words = wordpunct_tokenize(sentence)
tagged = pos_tag(words)
for word, tag in tagged:
has_noun = False
if tag[:2] == "NN":
has_noun = True
if has_noun and tag[:2] == "VB":
output_sentences.append(sentence)
break
return output_sentences
| mit | Python |
52c01db2efaf3c124695bb182938cf146d02d34c | make tests compaitble with python 2.6 | jumpstarter-io/ceph-deploy,jumpstarter-io/ceph-deploy,Vicente-Cheng/ceph-deploy,branto1/ceph-deploy,rtulke/ceph-deploy,Vicente-Cheng/ceph-deploy,shenhequnying/ceph-deploy,ghxandsky/ceph-deploy,alfredodeza/ceph-deploy,ceph/ceph-deploy,shenhequnying/ceph-deploy,osynge/ceph-deploy,imzhulei/ceph-deploy,isyippee/ceph-deploy,trhoden/ceph-deploy,ktdreyer/ceph-deploy,rtulke/ceph-deploy,ddiss/ceph-deploy,codenrhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ddiss/ceph-deploy,zhouyuan/ceph-deploy,branto1/ceph-deploy,SUSE/ceph-deploy,osynge/ceph-deploy,trhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ghxandsky/ceph-deploy,codenrhoden/ceph-deploy,alfredodeza/ceph-deploy,ktdreyer/ceph-deploy,zhouyuan/ceph-deploy,SUSE/ceph-deploy,isyippee/ceph-deploy,ceph/ceph-deploy,imzhulei/ceph-deploy | ceph_deploy/tests/test_cli.py | ceph_deploy/tests/test_cli.py | import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy' in result
assert 'Deploy Ceph' in result
assert 'optional arguments:' in result
assert 'commands:' in result
def test_bad_command(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'bork'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy' in result
assert err.value.status == 2
assert [p.basename for p in tmpdir.listdir()] == []
def test_bad_cluster(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', '--cluster=/evil-this-should-not-be-created', 'new'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy' in result
assert err.value.status == 2
assert [p.basename for p in tmpdir.listdir()] == []
| import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy' in result
assert 'Deploy Ceph' in result
assert 'optional arguments:' in result
assert 'commands:' in result
def test_bad_command(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'bork'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy' in result
assert err.value.status == 2
assert {p.basename for p in tmpdir.listdir()} == set()
def test_bad_cluster(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', '--cluster=/evil-this-should-not-be-created', 'new'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy' in result
assert err.value.status == 2
assert {p.basename for p in tmpdir.listdir()} == set()
| mit | Python |
9b95c126faa93814011b49228d942e0244d546e6 | Use UTF8 encoding | lonewolf07/coala,Uran198/coala,svsn2117/coala,Nosferatul/coala,sophiavanvalkenburg/coala,jayvdb/coala,CruiseDevice/coala,arafsheikh/coala,lonewolf07/coala,MariosPanag/coala,Balaji2198/coala,yashLadha/coala,kartikeys98/coala,yland/coala,Asalle/coala,rimacone/testing2,tltuan/coala,NalinG/coala,sophiavanvalkenburg/coala,ManjiriBirajdar/coala,aptrishu/coala,RJ722/coala,SanketDG/coala,djkonro/coala,karansingh1559/coala,tushar-rishav/coala,arafsheikh/coala,sils1297/coala,jayvdb/coala,damngamerz/coala,coala/coala,arjunsinghy96/coala,netman92/coala,sagark123/coala,vinc456/coala,AbdealiJK/coala,sagark123/coala,JohnS-01/coala,vinc456/coala,yashtrivedi96/coala,impmihai/coala,dagdaggo/coala,AdeshAtole/coala,netman92/coala,ayushin78/coala,scottbelden/coala,yland/coala,mr-karan/coala,NiklasMM/coala,RJ722/coala,dagdaggo/coala,abhiroyg/coala,ayushin78/coala,dagdaggo/coala,sils1297/coala,tltuan/coala,shreyans800755/coala,tltuan/coala,stevemontana1980/coala,NalinG/coala,sophiavanvalkenburg/coala,ManjiriBirajdar/coala,kartikeys98/coala,Asalle/coala,mr-karan/coala,rimacone/testing2,vinc456/coala,stevemontana1980/coala,meetmangukiya/coala,yashLadha/coala,scottbelden/coala,rresol/coala,refeed/coala,MattAllmendinger/coala,sagark123/coala,AdeshAtole/coala,impmihai/coala,Asnelchristian/coala,Asnelchristian/coala,sudheesh001/coala,karansingh1559/coala,nemaniarjun/coala,MattAllmendinger/coala,meetmangukiya/coala,NalinG/coala,rimacone/testing2,Balaji2198/coala,refeed/coala,Shade5/coala,incorrectusername/coala,CruiseDevice/coala,yashtrivedi96/coala,karansingh1559/coala,CruiseDevice/coala,aptrishu/coala,coala-analyzer/coala,AbdealiJK/coala,refeed/coala,swatilodha/coala,ManjiriBirajdar/coala,saurabhiiit/coala,abhiroyg/coala,SanketDG/coala,JohnS-01/coala,arjunsinghy96/coala,ayushin78/coala,Balaji2198/coala,yashtrivedi96/coala,arush0311/coala,Uran198/coala,mr-karan/coala,MattAllmendinger/coala,coala/coala,scottbelden/coala,jayvdb/coala,rresol/coala,AbdealiJK/coala,MariosPanag/coala,incorrectusername/coala,arush0311/coala,nemaniarjun/coala,yashLadha/coala,d6e/coala,djkonro/coala,netman92/coala,nemaniarjun/coala,coala-analyzer/coala,NalinG/coala,meetmangukiya/coala,d6e/coala,coala/coala,Nosferatul/coala,impmihai/coala,arush0311/coala,saurabhiiit/coala,arjunsinghy96/coala,damngamerz/coala,NiklasMM/coala,Asnelchristian/coala,Shade5/coala,aptrishu/coala,sudheesh001/coala,NiklasMM/coala,AdeshAtole/coala,tushar-rishav/coala,Asalle/coala,lonewolf07/coala,shreyans800755/coala,swatilodha/coala,JohnS-01/coala,tushar-rishav/coala,saurabhiiit/coala,djkonro/coala,incorrectusername/coala,arafsheikh/coala,MariosPanag/coala,SanketDG/coala,rresol/coala,sils1297/coala,damngamerz/coala,shreyans800755/coala,yland/coala,sudheesh001/coala,NalinG/coala,stevemontana1980/coala,svsn2117/coala,NalinG/coala,svsn2117/coala,Nosferatul/coala,swatilodha/coala,abhiroyg/coala,Shade5/coala,kartikeys98/coala,RJ722/coala,d6e/coala,Uran198/coala,coala-analyzer/coala,NalinG/coala | coalib/results/result_actions/OpenEditorAction.py | coalib/results/result_actions/OpenEditorAction.py | import subprocess
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
EDITOR_ARGS = {
"subl": "--wait",
"gedit": "-s",
"atom": "--wait"
}
GUI_EDITORS = ["kate", "gedit", "subl", "atom"]
class OpenEditorAction(ApplyPatchAction):
success_message = "Editor opened successfully."
@staticmethod
def is_applicable(result, original_file_dict, file_diff_dict):
return isinstance(result, Result) and len(result.affected_code) > 0
def apply(self, result, original_file_dict, file_diff_dict, editor: str):
'''
Open the affected file(s) in an editor.
:param editor: The editor to open the file with.
'''
# Use set to remove duplicates
filenames = set(src.file for src in result.affected_code)
editor_args = [editor] + list(filenames)
arg = EDITOR_ARGS.get(editor.strip(), None)
if arg:
editor_args.append(arg)
# Dear user, you wanted an editor, so you get it. But do you really
# think you can do better than we?
if editor in GUI_EDITORS:
subprocess.call(editor_args, stdout=subprocess.PIPE)
else:
subprocess.call(editor_args)
for filename in filenames:
with open(filename, encoding='utf-8') as file:
new_file = file.readlines()
original_file = original_file_dict[filename]
try:
current_file = file_diff_dict[filename].modified
except KeyError:
current_file = original_file
file_diff_dict[filename] = Diff.from_string_arrays(original_file,
new_file)
return file_diff_dict
| import subprocess
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
EDITOR_ARGS = {
"subl": "--wait",
"gedit": "-s",
"atom": "--wait"
}
GUI_EDITORS = ["kate", "gedit", "subl", "atom"]
class OpenEditorAction(ApplyPatchAction):
success_message = "Editor opened successfully."
@staticmethod
def is_applicable(result, original_file_dict, file_diff_dict):
return isinstance(result, Result) and len(result.affected_code) > 0
def apply(self, result, original_file_dict, file_diff_dict, editor: str):
'''
Open the affected file(s) in an editor.
:param editor: The editor to open the file with.
'''
# Use set to remove duplicates
filenames = set(src.file for src in result.affected_code)
editor_args = [editor] + list(filenames)
arg = EDITOR_ARGS.get(editor.strip(), None)
if arg:
editor_args.append(arg)
# Dear user, you wanted an editor, so you get it. But do you really
# think you can do better than we?
if editor in GUI_EDITORS:
subprocess.call(editor_args, stdout=subprocess.PIPE)
else:
subprocess.call(editor_args)
for filename in filenames:
with open(filename) as file:
new_file = file.readlines()
original_file = original_file_dict[filename]
try:
current_file = file_diff_dict[filename].modified
except KeyError:
current_file = original_file
file_diff_dict[filename] = Diff.from_string_arrays(original_file,
new_file)
return file_diff_dict
| agpl-3.0 | Python |
33bc8b39b1ce307536a63b98629fc2241b0fd29f | add error message when asserting False | freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut | tests/twisted/avahi/test-disabled-1-1-tubes.py | tests/twisted/avahi/test-disabled-1-1-tubes.py | """
Test if 1-1 tubes support is properly disabled.
This test should be removed as soon as we re-enable 1-1 tubes support.
"""
from saluttest import exec_test
from avahitest import AvahiAnnouncer, AvahiListener
from avahitest import get_host_name
import avahi
import dbus
import os
import errno
import string
from xmppstream import setup_stream_listener, connect_to_stream
from servicetest import make_channel_proxy, Event
from twisted.words.xish import xpath, domish
from twisted.internet.protocol import Factory, Protocol, ClientCreator
from twisted.internet import reactor
PUBLISHED_NAME="test-tube"
CHANNEL_TYPE_TUBES = "org.freedesktop.Telepathy.Channel.Type.Tubes"
HT_CONTACT = 1
HT_CONTACT_LIST = 3
TEXT_MESSAGE_TYPE_NORMAL = dbus.UInt32(0)
SOCKET_ADDRESS_TYPE_UNIX = dbus.UInt32(0)
SOCKET_ADDRESS_TYPE_IPV4 = dbus.UInt32(2)
SOCKET_ACCESS_CONTROL_LOCALHOST = dbus.UInt32(0)
sample_parameters = dbus.Dictionary({
's': 'hello',
'ay': dbus.ByteArray('hello'),
'u': dbus.UInt32(123),
'i': dbus.Int32(-123),
}, signature='sv')
print "FIXME: test-disabled-1-1-tubes.py disabled because 1-1 tubes are enabled"
# exiting 77 causes automake to consider the test to have been skipped
raise SystemExit(77)
def test(q, bus, conn):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0L, 0L])
basic_txt = { "txtvers": "1", "status": "avail" }
contact_name = PUBLISHED_NAME + get_host_name()
listener, port = setup_stream_listener(q, contact_name)
announcer = AvahiAnnouncer(contact_name, "_presence._tcp", port, basic_txt)
publish_handle = conn.RequestHandles(HT_CONTACT_LIST, ["publish"])[0]
publish = conn.RequestChannel(
"org.freedesktop.Telepathy.Channel.Type.ContactList",
HT_CONTACT_LIST, publish_handle, False)
handle = 0
# Wait until the record shows up in publish
while handle == 0:
e = q.expect('dbus-signal', signal='MembersChanged', path=publish)
for h in e.args[1]:
name = conn.InspectHandles(HT_CONTACT, [h])[0]
if name == contact_name:
handle = h
# we can't request 1-1 tubes channel
try:
conn.RequestChannel(CHANNEL_TYPE_TUBES, HT_CONTACT, handle,
True)
except dbus.DBusException, e:
assert e.get_dbus_name() == 'org.freedesktop.Telepathy.Errors.NotImplemented'
else:
assert False, "Should raise NotImplemented error"
if __name__ == '__main__':
exec_test(test)
| """
Test if 1-1 tubes support is properly disabled.
This test should be removed as soon as we re-enable 1-1 tubes support.
"""
from saluttest import exec_test
from avahitest import AvahiAnnouncer, AvahiListener
from avahitest import get_host_name
import avahi
import dbus
import os
import errno
import string
from xmppstream import setup_stream_listener, connect_to_stream
from servicetest import make_channel_proxy, Event
from twisted.words.xish import xpath, domish
from twisted.internet.protocol import Factory, Protocol, ClientCreator
from twisted.internet import reactor
PUBLISHED_NAME="test-tube"
CHANNEL_TYPE_TUBES = "org.freedesktop.Telepathy.Channel.Type.Tubes"
HT_CONTACT = 1
HT_CONTACT_LIST = 3
TEXT_MESSAGE_TYPE_NORMAL = dbus.UInt32(0)
SOCKET_ADDRESS_TYPE_UNIX = dbus.UInt32(0)
SOCKET_ADDRESS_TYPE_IPV4 = dbus.UInt32(2)
SOCKET_ACCESS_CONTROL_LOCALHOST = dbus.UInt32(0)
sample_parameters = dbus.Dictionary({
's': 'hello',
'ay': dbus.ByteArray('hello'),
'u': dbus.UInt32(123),
'i': dbus.Int32(-123),
}, signature='sv')
print "FIXME: test-disabled-1-1-tubes.py disabled because 1-1 tubes are enabled"
# exiting 77 causes automake to consider the test to have been skipped
raise SystemExit(77)
def test(q, bus, conn):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0L, 0L])
basic_txt = { "txtvers": "1", "status": "avail" }
contact_name = PUBLISHED_NAME + get_host_name()
listener, port = setup_stream_listener(q, contact_name)
announcer = AvahiAnnouncer(contact_name, "_presence._tcp", port, basic_txt)
publish_handle = conn.RequestHandles(HT_CONTACT_LIST, ["publish"])[0]
publish = conn.RequestChannel(
"org.freedesktop.Telepathy.Channel.Type.ContactList",
HT_CONTACT_LIST, publish_handle, False)
handle = 0
# Wait until the record shows up in publish
while handle == 0:
e = q.expect('dbus-signal', signal='MembersChanged', path=publish)
for h in e.args[1]:
name = conn.InspectHandles(HT_CONTACT, [h])[0]
if name == contact_name:
handle = h
# we can't request 1-1 tubes channel
try:
conn.RequestChannel(CHANNEL_TYPE_TUBES, HT_CONTACT, handle,
True)
except dbus.DBusException, e:
assert e.get_dbus_name() == 'org.freedesktop.Telepathy.Errors.NotImplemented'
else:
assert False
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 | Python |
8fcb7205dbf36709e27d89734ff33a8cdc9b6968 | Fix docker_events field handling | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/engines/docker_events.py | salt/engines/docker_events.py | # -*- coding: utf-8 -*-
'''
Send events from Docker events
:Depends: Docker API >= 1.22
'''
# Import Python Libs
from __future__ import absolute_import
import json
import logging
import traceback
import salt.utils
# pylint: disable=import-error
try:
import docker
import docker.utils
HAS_DOCKER_PY = True
except ImportError:
HAS_DOCKER_PY = False
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Default timeout as of docker-py 1.0.0
CLIENT_TIMEOUT = 60
# Define the module's virtual name
__virtualname__ = 'docker_events'
def __virtual__():
'''
Only load if docker libs are present
'''
if not HAS_DOCKER_PY:
return (False, 'Docker_events engine could not be imported')
return True
def start(docker_url='unix://var/run/docker.sock',
timeout=CLIENT_TIMEOUT,
tag='salt/engines/docker_events'):
'''
Scan for Docker events and fire events
Example Config
.. code-block:: yaml
engines:
docker_events:
docker_url: unix://var/run/docker.sock
The config above sets up engines to listen
for events from the Docker daemon and publish
them to the Salt event bus.
'''
if __opts__.get('__role') == 'master':
fire_master = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir']).fire_event
else:
fire_master = None
def fire(tag, msg):
'''
How to fire the event
'''
if fire_master:
fire_master(msg, tag)
else:
__salt__['event.send'](tag, msg)
client = docker.Client(base_url=docker_url,
timeout=timeout)
try:
events = client.events()
for event in events:
data = json.loads(event)
# https://github.com/docker/cli/blob/master/cli/command/system/events.go#L109
# https://github.com/docker/engine-api/blob/master/types/events/events.go
# Each output includes the event type, actor id, name and action.
# status field can be ommited
if data['Action']:
fire('{0}/{1}'.format(tag, data['Action']), data)
else:
fire('{0}/{1}'.format(tag, data['status']), data)
except Exception:
traceback.print_exc()
| # -*- coding: utf-8 -*-
'''
Send events from Docker events
:Depends: Docker API >= 1.22
'''
# Import Python Libs
from __future__ import absolute_import
import json
import logging
import traceback
import salt.utils
# pylint: disable=import-error
try:
import docker
import docker.utils
HAS_DOCKER_PY = True
except ImportError:
HAS_DOCKER_PY = False
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Default timeout as of docker-py 1.0.0
CLIENT_TIMEOUT = 60
# Define the module's virtual name
__virtualname__ = 'docker_events'
def __virtual__():
'''
Only load if docker libs are present
'''
if not HAS_DOCKER_PY:
return (False, 'Docker_events engine could not be imported')
return True
def start(docker_url='unix://var/run/docker.sock',
timeout=CLIENT_TIMEOUT,
tag='salt/engines/docker_events'):
'''
Scan for Docker events and fire events
Example Config
.. code-block:: yaml
engines:
docker_events:
docker_url: unix://var/run/docker.sock
The config above sets up engines to listen
for events from the Docker daemon and publish
them to the Salt event bus.
'''
if __opts__.get('__role') == 'master':
fire_master = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir']).fire_event
else:
fire_master = None
def fire(tag, msg):
'''
How to fire the event
'''
if fire_master:
fire_master(msg, tag)
else:
__salt__['event.send'](tag, msg)
client = docker.Client(base_url=docker_url,
timeout=timeout)
try:
events = client.events()
for event in events:
data = json.loads(event)
fire('{0}/{1}'.format(tag, data['status']), data)
except Exception:
traceback.print_exc()
| apache-2.0 | Python |
37215ba859e393b11187f6a98cb0fd89f4fbbaf4 | Simplify one more tests. | Neki/searchengine | searchengine/tests/evaluation/test_evaluation.py | searchengine/tests/evaluation/test_evaluation.py | import unittest
from unittest import TestCase
from searchengine.evaluation.evaluation import *
from searchengine.search.vectorial_search import vectorial_search
from searchengine.parser import CacmDocument
from searchengine.index.process import Weighting
class TestEvaluation(TestCase):
def test_number_of_relevant_documents(self):
request = Request(1, "tata", [1, 2, 5, 12])
search_result = [(1, 20), (2, 3), (4, 5), (7, 2)]
self.assertEqual(2, number_of_relevant_documents(request, search_result))
def test_precision(self):
request = Request(1, "tata", [1, 2, 5, 12])
search_result = [(1, 20), (2, 3), (4, 5), (7, 2)]
self.assertEqual(0.5, precision(request, search_result))
def test_recall(self):
request = Request(1, "tata", [1, 2, 3, 4, 5, 6, 7, 8])
search_result = [(1, 20), (3, 3)]
self.assertEqual(0.25, rappel(request, search_result))
@unittest.skip("Skipping until Travis CI is properly configured (this test blocks otherwise)")
def test_plot_precision_rappel(self):
request = Request(1,"tata",[1,2])
document1 = CacmDocument(1, "aujourd \n toto \n tata","il fait \n tyty","toto \n tata tyty")
document2 = CacmDocument(2,"plouf \n paf","tata \n toto il tata","tyty plouf \n tata paf")
document_list = [document1, document2]
common_words=["aujourd","il","fait","ca","mot","ok"]
search_results = vectorial_search(request.text, document_list, common_words, 2,Weighting.TermFrequency)
plot_precision_rappel(request, search_results)
| import unittest
from unittest import TestCase
from searchengine.evaluation.evaluation import *
from searchengine.search.vectorial_search import vectorial_search
from searchengine.parser import CacmDocument
from searchengine.index.process import Weighting
class TestEvaluation(TestCase):
def test_number_of_relevant_documents(self):
request = Request(1, "tata", [1, 2, 5, 12])
search_result = [(1, 20), (2, 3), (4, 5), (7, 2)]
self.assertEqual(2, number_of_relevant_documents(request, search_result))
def test_precision(self):
request = Request(1, "tata", [1, 2, 5, 12])
search_result = [(1, 20), (2, 3), (4, 5), (7, 2)]
self.assertEqual(0.5, precision(request, search_result))
def test_rappel(self):
request = Request(1,"tata",[1,2])
document1 = CacmDocument(1, "aujourd \n toto \n tata","il fait \n tyty","toto \n tata tyty")
document2 = CacmDocument(2,"plouf \n paf","tata \n toto il tata","tyty plouf \n tata paf")
document_list = [document1, document2]
common_words=["aujourd","il","fait","ca","mot","ok"]
search_results = vectorial_search(request.text, document_list, common_words, 2,Weighting.TermFrequency)
self.assertEqual(1,rappel(request, search_results))
@unittest.skip("Skipping until Travis CI is properly configured (this test blocks otherwise)")
def test_plot_precision_rappel(self):
request = Request(1,"tata",[1,2])
document1 = CacmDocument(1, "aujourd \n toto \n tata","il fait \n tyty","toto \n tata tyty")
document2 = CacmDocument(2,"plouf \n paf","tata \n toto il tata","tyty plouf \n tata paf")
document_list = [document1, document2]
common_words=["aujourd","il","fait","ca","mot","ok"]
search_results = vectorial_search(request.text, document_list, common_words, 2,Weighting.TermFrequency)
plot_precision_rappel(request, search_results)
| mit | Python |
cdcf6899522f3d768c80684b1f43366513b4a301 | fix version in __init__ | bassio/omicexperiment | omicexperiment/__init__.py | omicexperiment/__init__.py |
__version__ = '0.1.2-dev1'
|
<<<<<<< HEAD
__version__ = '0.01-dev'
=======
__version__ = '0.1.2-dev1'
>>>>>>> 0d1b27e06abfcfcec83d67ebc5842b2954537620
| bsd-3-clause | Python |
f7b8112764a66dbfb394a2584fef354c248de60a | add iphone sim detection macros | bittorrent/needy,vmrob/needy,ccbrown/needy,bittorrent/needy,ccbrown/needy,vmrob/needy | needy/platforms/iphonesim.py | needy/platforms/iphonesim.py | from ..platform import Platform
import platform
DEFAULT_MIN_IOS_VERSION = '5.0'
class iPhoneSimulatorPlatform(Platform):
def __init__(self, parameters):
Platform.__init__(self, parameters)
self.__minimum_version = parameters.minimum_ios_version if 'minimum_ios_version' in parameters else DEFAULT_MIN_IOS_VERSION
@staticmethod
def identifier():
return 'iphonesim'
def default_architecture(self):
return platform.machine()
def c_compiler(self, architecture):
return 'xcrun -sdk iphonesimulator clang -arch %s -mios-version-min=%s' % (architecture, self.__minimum_version)
def cxx_compiler(self, architecture):
return 'xcrun -sdk iphonesimulator clang++ -arch %s -mios-version-min=%s' % (architecture, self.__minimum_version)
@staticmethod
def detection_macro(architecture):
if architecture == 'x86_64':
return 'TARGET_OS_IOS && TARGET_OS_SIMULATOR && __LP64__'
elif architecture == 'i386':
return 'TARGET_OS_IOS && TARGET_OS_SIMULATOR && !__LP64__'
return None
| from ..platform import Platform
import platform
DEFAULT_MIN_IOS_VERSION = '5.0'
class iPhoneSimulatorPlatform(Platform):
def __init__(self, parameters):
Platform.__init__(self, parameters)
self.__minimum_version = parameters.minimum_ios_version if 'minimum_ios_version' in parameters else DEFAULT_MIN_IOS_VERSION
@staticmethod
def identifier():
return 'iphonesim'
def default_architecture(self):
return platform.machine()
def c_compiler(self, architecture):
return 'xcrun -sdk iphonesimulator clang -arch %s -mios-version-min=%s' % (architecture, self.__minimum_version)
def cxx_compiler(self, architecture):
return 'xcrun -sdk iphonesimulator clang++ -arch %s -mios-version-min=%s' % (architecture, self.__minimum_version)
@staticmethod
def detection_macro(architecture):
return 'TARGET_OS_IOS && TARGET_OS_SIMULATOR'
| mit | Python |
3a3ab4b7410cb9f491b56d785236832b94e6936b | Add tests for Mocking.HistoryItem. | vishwaprakashmishra/xmatrix,vishwaprakashmishra/xmatrix,harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,TouK/vumi,TouK/vumi,TouK/vumi | vumi/tests/test_testutils.py | vumi/tests/test_testutils.py | from twisted.trial.unittest import TestCase
from vumi.service import Worker
from vumi.tests.utils import get_stubbed_worker, Mocking
from vumi.tests.fake_amqp import FakeAMQClient
class ToyWorker(Worker):
def poke(self):
return "poke"
class MockingHistoryItemTestCase(TestCase):
def test_basic_item(self):
item = Mocking.HistoryItem(("a", "b"), {"c": 1})
self.assertEqual(item.args, ("a", "b"))
self.assertEqual(item.kwargs, {"c": 1})
def test_repr(self):
item = Mocking.HistoryItem(("a", "b"), {"c": 1})
self.assertEqual(repr(item), "<'HistoryItem' object at %s"
" [args: ('a', 'b'), kw: {'c': 1}]>" % id(item))
class UtilsTestCase(TestCase):
def test_get_stubbed_worker(self):
worker = get_stubbed_worker(ToyWorker)
self.assertEqual("poke", worker.poke())
self.assertTrue(isinstance(worker._amqp_client, FakeAMQClient))
def test_get_stubbed_worker_with_config(self):
options = {'key': 'value'}
worker = get_stubbed_worker(ToyWorker, options)
self.assertEqual({}, worker._amqp_client.vumi_options)
self.assertEqual(options, worker.config)
| from twisted.trial.unittest import TestCase
from vumi.service import Worker
from vumi.tests.utils import get_stubbed_worker
from vumi.tests.fake_amqp import FakeAMQClient
class ToyWorker(Worker):
def poke(self):
return "poke"
class UtilsTestCase(TestCase):
def test_get_stubbed_worker(self):
worker = get_stubbed_worker(ToyWorker)
self.assertEqual("poke", worker.poke())
self.assertTrue(isinstance(worker._amqp_client, FakeAMQClient))
def test_get_stubbed_worker_with_config(self):
options = {'key': 'value'}
worker = get_stubbed_worker(ToyWorker, options)
self.assertEqual({}, worker._amqp_client.vumi_options)
self.assertEqual(options, worker.config)
| bsd-3-clause | Python |
f1ee5b9b6f5b173895b19fd16b635a570927605d | Use shell=True for windows npm installation of gauge | kashishm/gauge-python,kashishm/gauge-python | check_and_install_getgauge.py | check_and_install_getgauge.py | import sys
import json
import pkg_resources
from subprocess import check_output
def get_version():
out = check_output(["gauge", "-v", "--machine-readable"],shell=True)
data = json.loads(str(out.decode()))
for plugin in data['plugins']:
if plugin['name'] == 'python':
return plugin['version']
return ''
def get_dev_getgauge_version(plugin_nightly_version):
refined_version = plugin_nightly_version.replace(
'nightly', '').replace('-', '')
return refined_version[:6] + "dev" + refined_version[6:]
def install_getgauge(getgauge_version):
install_cmd = [sys.executable, "-m", "pip", "install", getgauge_version, "--user"]
if "dev" in getgauge_version:
install_cmd.append("--pre")
check_output(install_cmd)
def assert_versions():
python_plugin_version = get_version()
if not python_plugin_version:
print('The gauge python plugin is not installed!')
exit(1)
expected_gauge_version = python_plugin_version
if "nightly" in python_plugin_version:
expected_gauge_version = get_dev_getgauge_version(
python_plugin_version)
try:
getgauge_version = pkg_resources.get_distribution('getgauge').version
if getgauge_version != expected_gauge_version:
install_getgauge("getgauge=="+expected_gauge_version)
except pkg_resources.DistributionNotFound:
install_getgauge("getgauge=="+expected_gauge_version)
if __name__ == '__main__':
assert_versions()
| import sys
import json
import pkg_resources
from subprocess import check_output
def get_version():
out = check_output(["gauge", "-v", "--machine-readable"])
data = json.loads(str(out.decode()))
for plugin in data['plugins']:
if plugin['name'] == 'python':
return plugin['version']
return ''
def get_dev_getgauge_version(plugin_nightly_version):
refined_version = plugin_nightly_version.replace(
'nightly', '').replace('-', '')
return refined_version[:6] + "dev" + refined_version[6:]
def install_getgauge(getgauge_version):
install_cmd = [sys.executable, "-m", "pip", "install", getgauge_version, "--user"]
if "dev" in getgauge_version:
install_cmd.append("--pre")
check_output(install_cmd)
def assert_versions():
python_plugin_version = get_version()
if not python_plugin_version:
print('The gauge python plugin is not installed!')
exit(1)
expected_gauge_version = python_plugin_version
if "nightly" in python_plugin_version:
expected_gauge_version = get_dev_getgauge_version(
python_plugin_version)
try:
getgauge_version = pkg_resources.get_distribution('getgauge').version
if getgauge_version != expected_gauge_version:
install_getgauge("getgauge=="+expected_gauge_version)
except pkg_resources.DistributionNotFound:
install_getgauge("getgauge=="+expected_gauge_version)
if __name__ == '__main__':
assert_versions()
| mit | Python |
8e275405af5c79f80dbb9870fef1349ad20e619f | Fix missing imports. | salopensource/sal,sheagcraig/sal,sheagcraig/sal,salopensource/sal,sheagcraig/sal,salopensource/sal,salopensource/sal,sheagcraig/sal | server/management/commands/server_maintenance.py | server/management/commands/server_maintenance.py | """Cleans up plugin script submissions and update histories that exceed the retention limit"""
import datetime
import gc
from time import sleep
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.models import Q
import django.utils.timezone
import server.utils
from server.models import PluginScriptSubmission, UpdateHistory, UpdateHistoryItem
class Command(BaseCommand):
help = (
'Cleans up plugin script submissions and update histories that exceed the retention limit')
def add_arguments(self, parser):
parser.add_argument('sleep_time', type=int, nargs='?', default=0)
def handle(self, *args, **options):
sleep_time = options['sleep_time']
sleep(sleep_time)
historical_days = server.utils.get_setting('historical_retention')
datelimit = django.utils.timezone.now() - datetime.timedelta(days=historical_days)
# Clear out too-old plugin script submissions.
PluginScriptSubmission.objects.filter(recorded__lt=datelimit).delete()
# Clean up UpdateHistory and items which are over our retention
# limit and are no longer managed, or which have no history items.
for history in UpdateHistory.objects.all():
try:
latest = history.updatehistoryitem_set.latest('recorded').recorded
except UpdateHistoryItem.DoesNotExist:
history.delete()
continue
if latest < datelimit:
history.delete()
gc.collect()
| """Cleans up plugin script submissions and update histories that exceed the retention limit"""
import datetime
import gc
from time import sleep
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.models import Q
import django.utils.timezone
import server.utils
from server.models import PluginScriptSubmission
class Command(BaseCommand):
help = (
'Cleans up plugin script submissions and update histories that exceed the retention limit')
def add_arguments(self, parser):
parser.add_argument('sleep_time', type=int, nargs='?', default=0)
def handle(self, *args, **options):
sleep_time = options['sleep_time']
sleep(sleep_time)
historical_days = server.utils.get_setting('historical_retention')
datelimit = django.utils.timezone.now() - datetime.timedelta(days=historical_days)
# Clear out too-old plugin script submissions.
PluginScriptSubmission.objects.filter(recorded__lt=datelimit).delete()
# Clean up UpdateHistory and items which are over our retention
# limit and are no longer managed, or which have no history items.
for history in UpdateHistory.objects.all():
try:
latest = history.updatehistoryitem_set.latest('recorded').recorded
except UpdateHistoryItem.DoesNotExist:
history.delete()
continue
if latest < datelimit:
history.delete()
gc.collect()
| apache-2.0 | Python |
15a0293c45d8b799235b690bd9c4570f388bd4a7 | Fix some abiguities | qasim/cobalt,cobalt-uoft/cobalt,cobalt-io/cobalt,ivanzhangio/cobalt,kshvmdn/cobalt,cobalt-io/cobalt | web-scraper/course_finder.py | web-scraper/course_finder.py | import requests
import http.cookiejar
import time
class CourseFinder:
"""A wrapper for utilizing UofT's Course Finder web service."""
def __init__(self):
self.host = 'http://coursefinder.utoronto.ca'
def search(self, query='', requirements=''):
"""Perform a search and return the data as a dict."""
url = '%s/course-search/search/courseSearch/course/search' % self.host
data = {
'queryText': query,
'requirements': requirements,
'campusParam': 'St. George,Scarborough,Mississauga'
}
cookies = http.cookiejar.CookieJar()
s = requests.Session()
# Keep trying to get data until a proper response is given
json = None
while json is None:
r = s.get(url, params=data, cookies=cookies)
if r.status_code == 200:
json = r.json()
else:
time.sleep(0.5)
return json
| import requests
import http.cookiejar
import time
class CourseFinder:
"""A wrapper for utilizing UofT's Course Finder web service."""
def __init__(self):
self.host = 'http://coursefinder.utoronto.ca'
def search(self, query='', requirements=''):
"""Perform a UofT Course Finder search and return data as a dict."""
url = '%s/course-search/search/courseSearch/course/search' % self.host
data = {
'queryText': query,
'requirements': requirements,
'campusParam': 'St. George,Scarborough,Mississauga'
}
cookies = http.cookiejar.CookieJar()
s = requests.Session()
# Keep trying to get data until a proper response is given
json = None
while json is None:
r = s.get(url, params=data, cookies=cookies)
if r.status_code == 200:
json = r.json()
else:
time.sleep(0.5)
return json
| mit | Python |
267226978395e30ad9e5e30acb25eb569a67b6b1 | Delete print statements from emailer | ViderumGlobal/ckanext-orgportals,ViderumGlobal/ckanext-orgportals,ViderumGlobal/ckanext-orgportals,ViderumGlobal/ckanext-orgportals | ckanext/orgportals/emailer.py | ckanext/orgportals/emailer.py | import logging
import smtplib
from socket import error as socket_error
from email.mime.text import MIMEText
from pylons import config
log = logging.getLogger(__name__)
FROM = config.get('ckanext.orgportals.smtp.mail.from', 'usedata@montroseint.com')
SMTP_SERVER = config.get('ckanext.orgportals.smtp.server', 'localhost')
SMTP_USER = config.get('ckanext.orgportals.smtp.user', 'username')
SMTP_PASSWORD = config.get('ckanext.orgportals.smtp.password', 'password')
def send_email(content, subject, to, from_=FROM):
msg = MIMEText(content,'plain','UTF-8')
if isinstance(to, basestring):
to = [to]
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = ','.join(to)
try:
s = smtplib.SMTP(SMTP_SERVER)
s.login(SMTP_USER, SMTP_PASSWORD)
s.sendmail(from_, to, msg.as_string())
s.quit()
return 'Email message was successfully sent.'
except socket_error:
log.critical('Could not connect to email server. Have you configured the SMTP settings?')
return 'An error occured while sending the email. Try again.'
| import logging
import smtplib
from socket import error as socket_error
from email.mime.text import MIMEText
from pylons import config
log = logging.getLogger(__name__)
FROM = config.get('ckanext.orgportals.smtp.mail.from', 'usedata@montroseint.com')
SMTP_SERVER = config.get('ckanext.orgportals.smtp.server', 'localhost')
SMTP_USER = config.get('ckanext.orgportals.smtp.user', 'username')
SMTP_PASSWORD = config.get('ckanext.orgportals.smtp.password', 'password')
def send_email(content, subject, to, from_=FROM):
msg = MIMEText(content,'plain','UTF-8')
if isinstance(to, basestring):
to = [to]
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = ','.join(to)
print 'SMTP_SERVER: ', SMTP_SERVER
print 'SMTP_USER: ', SMTP_USER
print 'SMTP_PASSWORD: ', SMTP_PASSWORD
print 'from_: ', from_
print 'to: ', to
print 'msg: ', msg
try:
s = smtplib.SMTP(SMTP_SERVER)
s.login(SMTP_USER, SMTP_PASSWORD)
s.sendmail(from_, to, msg.as_string())
s.quit()
return 'Email message was successfully sent.'
except socket_error:
log.critical('Could not connect to email server. Have you configured the SMTP settings?')
return 'An error occured while sending the email. Try again.'
| agpl-3.0 | Python |
468d964fda4da65f24bc25812987ee0b831ae140 | add include/exclude flags to validate task. Closes #38 | cathydeng/openelections-core,openelections/openelections-core,datamade/openelections-core,cathydeng/openelections-core,openelections/openelections-core,datamade/openelections-core | openelex/tasks/validate.py | openelex/tasks/validate.py | import os
import sys
from collections import OrderedDict
from invoke import task
from .utils import load_module, split_args
@task(help={
'state':'Two-letter state-abbreviation, e.g. NY',
})
def list(state):
"""
Show available validations for state.
"""
state_mod = load_module(state, ['validate'])
print "\nAvailable validators:\n"
for name in dir(state_mod.validate):
if name.startswith('validate_'):
func = getattr(state_mod.validate, name)
out = "\t%s" % name
if func.func_doc:
out += "\n\t\t %s" % func.func_doc
print out + "\n"
@task(help={
'state':'Two-letter state-abbreviation, e.g. NY',
'include': 'Validations to run (comma-separated list)',
'exclude': 'Validations to skip (comma-separated list)',
})
def run(state, include=None, exclude=None):
"""
Run data validations for state.
State is required. Optionally provide to limit validations that are performed.
"""
if include and exclude:
sys.exit("ERROR: You can not use both include and exclude flags!")
state_mod = load_module(state, ['validate'])
# Load all validations in order found
validations = OrderedDict()
for name in dir(state_mod.validate):
if name.startswith('validate_'):
func = getattr(state_mod.validate, name)
validations[name] = func
# Filter validations based in include/exclude flags
if include:
to_run = split_args(include)
for val in validations:
if val not in to_run:
validations.pop(val)
if exclude:
to_skip = split_args(exclude)
for val in validations:
if val in to_skip:
validations.pop(val)
# Run remaining validations
passed = []
failed = []
print
for val, func in validations.items():
try:
func()
passed.append(name)
except Exception as e:
failed.append("Error: %s - %s - %s" % (state.upper(), name, e))
print "\n\nVALIDATION RESULTS"
print "Passed: %s" % len(passed)
print "Failed: %s" % len(failed)
for fail in failed:
print "\t%s" % fail
print
| import os
import sys
from invoke import task
from .utils import load_module
@task(help={
'state':'Two-letter state-abbreviation, e.g. NY',
})
def list(state):
state_mod = load_module(state, ['validate'])
print "\nAvailable validators:\n"
for name in dir(state_mod.validate):
if name.startswith('validate_'):
func = getattr(state_mod.validate, name)
out = "\t%s" % name
if func.func_doc:
out += "\n\t\t %s" % func.func_doc
print out + "\n"
@task(help={
'state':'Two-letter state-abbreviation, e.g. NY',
})
def run(state):
"""
Run data validations.
State is required.
"""
state_mod = load_module(state, ['validate'])
passed = []
failed = []
print
for name in dir(state_mod.validate):
if name.startswith('validate_'):
func = getattr(state_mod.validate, name)
try:
func()
passed.append(name)
except Exception as e:
failed.append("Error: %s - %s - %s" % (state.upper(), name, e))
print "\n\nVALIDATION RESULTS"
print "Passed: %s" % len(passed)
print "Failed: %s" % len(failed)
for fail in failed:
print "\t%s" % fail
print
| mit | Python |
84855334bf0a07d8b0d9e9023c980c88a52e2e65 | fix for django<1.6 | allink/allink-frontend-editor,allink/allink-frontend-editor | allink_frontend_editor/page_extensions/snippets.py | allink_frontend_editor/page_extensions/snippets.py | from __future__ import absolute_import, unicode_literals
from functools import update_wrapper
from django.contrib.admin.util import unquote
from django.http import HttpResponse
from django.shortcuts import render
from feincms import extensions
from ..fields import JSONField
from ..forms import SnippetForm
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('_snippets', JSONField('Snippets', null=True))
def handle_modeladmin(self, modeladmin):
modeladmin_class = type(modeladmin)
def snippets_view(inner_self, request, object_id, identifier):
if not request.user.has_module_perms('page'):
return HttpResponse('Unauthorized', status=401)
obj = inner_self.get_object(request, unquote(object_id))
if request.method == 'POST':
form = SnippetForm(request.POST, page=obj, identifier=identifier)
if form.is_valid():
form.save()
return HttpResponse('OK')
else:
form = SnippetForm(page=obj, identifier=identifier)
return render(request, 'allink_frontend_editor/snippet_form.html', {'form': form})
modeladmin_class.snippets_view = snippets_view
old_get_urls = modeladmin_class.get_urls
def get_urls(inner_self):
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return inner_self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = inner_self.model._meta.app_label, getattr(inner_self.model._meta, 'model_name', inner_self.model._meta.module_name) # django <1.6 had this as module_name
urlpatterns = patterns('',
url(r'^(.+)/snippets/([\w_]+)/$', wrap(inner_self.snippets_view), name='%s_%s_snippets' % info),
) + old_get_urls(inner_self)
return urlpatterns
modeladmin_class.get_urls = get_urls
| from __future__ import absolute_import, unicode_literals
from functools import update_wrapper
from django.contrib.admin.util import unquote
from django.http import HttpResponse
from django.shortcuts import render
from feincms import extensions
from ..fields import JSONField
from ..forms import SnippetForm
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('_snippets', JSONField('Snippets', null=True))
def handle_modeladmin(self, modeladmin):
modeladmin_class = type(modeladmin)
def snippets_view(inner_self, request, object_id, identifier):
if not request.user.has_module_perms('page'):
return HttpResponse('Unauthorized', status=401)
obj = inner_self.get_object(request, unquote(object_id))
if request.method == 'POST':
form = SnippetForm(request.POST, page=obj, identifier=identifier)
if form.is_valid():
form.save()
return HttpResponse('OK')
else:
form = SnippetForm(page=obj, identifier=identifier)
return render(request, 'allink_frontend_editor/snippet_form.html', {'form': form})
modeladmin_class.snippets_view = snippets_view
old_get_urls = modeladmin_class.get_urls
def get_urls(inner_self):
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return inner_self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = inner_self.model._meta.app_label, inner_self.model._meta.model_name
urlpatterns = patterns('',
url(r'^(.+)/snippets/([\w_]+)/$', wrap(inner_self.snippets_view), name='%s_%s_snippets' % info),
) + old_get_urls(inner_self)
return urlpatterns
modeladmin_class.get_urls = get_urls
| bsd-3-clause | Python |
54a16964d412f4c7d1c64c482e99122406ed1328 | Test colissimo id lengths, and raise some errors | willprice/weboob,nojhan/weboob-devel,willprice/weboob,yannrouillard/weboob,Boussadia/weboob,RouxRC/weboob,frankrousseau/weboob,laurent-george/weboob,yannrouillard/weboob,Boussadia/weboob,Boussadia/weboob,Konubinix/weboob,Boussadia/weboob,Konubinix/weboob,RouxRC/weboob,nojhan/weboob-devel,sputnick-dev/weboob,laurent-george/weboob,sputnick-dev/weboob,willprice/weboob,sputnick-dev/weboob,laurent-george/weboob,frankrousseau/weboob,nojhan/weboob-devel,frankrousseau/weboob,RouxRC/weboob,Konubinix/weboob,yannrouillard/weboob | modules/colissimo/backend.py | modules/colissimo/backend.py | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.parcel import ICapParcel, Parcel, Event
from weboob.capabilities.base import UserError
from weboob.tools.backend import BaseBackend
from .browser import ColissimoBrowser
from datetime import date
__all__ = ['ColissimoBackend']
class ColissimoBackend(BaseBackend, ICapParcel):
NAME = 'colissimo'
DESCRIPTION = u'Colissimo parcel tracking website'
MAINTAINER = u'Florent Fourcot'
EMAIL = 'weboob@flo.fourcot.fr'
VERSION = '0.h'
BROWSER = ColissimoBrowser
def get_parcel_tracking(self, _id):
# 13 is the magic length of colissimo tracking ids
if len(_id) != 13:
raise UserError(u"Colissimo ID's must have 13 print character")
data = self.browser.get_tracking_info(_id)
p = Parcel(_id)
label = data['message']
if data['error']:
raise UserError(u"label")
p.info = label
# TODO, need to know the delivery message
if u"remis au gardien ou" in label or u"Votre colis est livré" in label:
p.status = p.STATUS_ARRIVED
elif u"pas encore pris en charge par La Poste" in label:
p.status = p.STATUS_PLANNED
else:
p.status = p.STATUS_IN_TRANSIT
ev = Event(0)
ev.activity = label
ev.date = date(*reversed([int(x) for x in data['date'].split("/")]))
p.history = [ev]
return p
| # -*- coding: utf-8 -*-
# Copyright(C) 2013 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.parcel import ICapParcel, Parcel, Event
from weboob.tools.backend import BaseBackend
from .browser import ColissimoBrowser
from datetime import date
__all__ = ['ColissimoBackend']
class ColissimoBackend(BaseBackend, ICapParcel):
NAME = 'colissimo'
DESCRIPTION = u'Colissimo parcel tracking website'
MAINTAINER = u'Florent Fourcot'
EMAIL = 'weboob@flo.fourcot.fr'
VERSION = '0.h'
BROWSER = ColissimoBrowser
def get_parcel_tracking(self, _id):
data = self.browser.get_tracking_info(_id)
p = Parcel(_id)
label = data['message']
if data['error']:
p.info = label
return p
p.info = label
# TODO, need to know the delivery message
if u"remis au gardien ou" in label or u"Votre colis est livré" in label:
p.status = p.STATUS_ARRIVED
elif u"pas encore pris en charge par La Poste" in label:
p.status = p.STATUS_PLANNED
else:
p.status = p.STATUS_IN_TRANSIT
ev = Event(0)
ev.activity = label
ev.date = date(*reversed([int(x) for x in data['date'].split("/")]))
p.history = [ev]
return p
| agpl-3.0 | Python |
99b8dac214f489dc3e429b643851c178d9d63ec9 | update urlpatterns to django 1.11 | cobrateam/django-htmlmin,cobrateam/django-htmlmin | htmlmin/tests/pico_django.py | htmlmin/tests/pico_django.py | # -*- coding: utf-8 -*-
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
'''
File: pico_django.py
Description: Code based on snippet available in the link below.
https://github.com/readevalprint/mini-django/blob/master/pico_django.py
'''
from django.conf.urls import url
from django.http import HttpResponse
from htmlmin.decorators import minified_response, not_minified_response
CONTENT = '''
<html>
<body>
<p>Hello world! :D</p>
<div>Copyright 3000</div>
</body>
</html>
'''
@minified_response
def minified(request):
return HttpResponse(CONTENT)
@not_minified_response
def not_minified(request):
return HttpResponse(CONTENT)
def raw(request):
return HttpResponse(CONTENT)
urlpatterns = [
url(r'^min$', minified),
url(r'^raw$', raw),
url(r'^not_min$', not_minified)
]
| # -*- coding: utf-8 -*-
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
'''
File: pico_django.py
Description: Code based on snippet available in the link below.
https://github.com/readevalprint/mini-django/blob/master/pico_django.py
'''
from django.conf.urls import patterns
from django.http import HttpResponse
from htmlmin.decorators import minified_response, not_minified_response
CONTENT = '''
<html>
<body>
<p>Hello world! :D</p>
<div>Copyright 3000</div>
</body>
</html>
'''
@minified_response
def minified(request):
return HttpResponse(CONTENT)
@not_minified_response
def not_minified(request):
return HttpResponse(CONTENT)
def raw(request):
return HttpResponse(CONTENT)
urlpatterns = patterns('',
(r'^min$', minified),
(r'^raw$', raw),
(r'^not_min$', not_minified))
| bsd-2-clause | Python |
238e573440c72581a051b16c15f56fcd25bece74 | Deal with sequences with odd characters | RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/rnacentral/ftp_export/ensembl.py | rnacentral_pipeline/rnacentral/ftp_export/ensembl.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import json
import operator as op
from jsonschema import validate
from rnacentral_pipeline import psql
MOD_URL = "http://modomics.genesilico.pl/sequences/list/{id}"
TRUSTED_DB = set(
[
"gtrnadb",
"lncrnadb",
"mirbase",
"modomics",
"pdbe",
"snopy" "srpdb",
"tmrna website",
]
)
SEQUENCE_PATTERN = re.compile('^[ACGTYRWSKMDVHBNXFI]+$')
def external_id(data):
if data["database"] == "PDBe":
return "%s_%s" % (data["external_id"], data["optional_id"])
if data["database"] == "Modomics":
return MOD_URL.format(id=data["external_id"])
return data["external_id"]
def is_high_quality(data):
name = data["database"].lower()
if name in TRUSTED_DB:
return True
if name == "rfam":
return data["molecule_type"] == "seed"
return False
def as_xref(xref):
return {"database": xref["database"], "id": external_id(xref)}
def builder(data):
result = dict(data)
xrefs = []
seen = set()
key = op.itemgetter("database", "id")
for xref in data["xrefs"]:
if not is_high_quality(xref):
continue
updated = as_xref(xref)
value = key(updated)
if value not in seen:
xrefs.append(updated)
seen.add(value)
result["sequence"] = result["sequence"].upper().replace("U", "T")
result["xrefs"] = xrefs
return result
def generate_file(raw, output, schema_file=None):
results = (builder(b) for b in psql.json_handler(raw))
results = [r for r in results if re.match(SEQUENCE_PATTERN, r['sequences'])]
if schema_file:
with open(schema_file, "r") as raw:
validate(results, json.load(raw))
json.dump(results, output)
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import operator as op
from jsonschema import validate
from rnacentral_pipeline import psql
MOD_URL = "http://modomics.genesilico.pl/sequences/list/{id}"
TRUSTED_DB = set(
[
"gtrnadb",
"lncrnadb",
"mirbase",
"modomics",
"pdbe",
"snopy" "srpdb",
"tmrna website",
]
)
def external_id(data):
if data["database"] == "PDBe":
return "%s_%s" % (data["external_id"], data["optional_id"])
if data["database"] == "Modomics":
return MOD_URL.format(id=data["external_id"])
return data["external_id"]
def is_high_quality(data):
name = data["database"].lower()
if name in TRUSTED_DB:
return True
if name == "rfam":
return data["molecule_type"] == "seed"
return False
def as_xref(xref):
return {"database": xref["database"], "id": external_id(xref)}
def builder(data):
result = dict(data)
xrefs = []
seen = set()
key = op.itemgetter("database", "id")
for xref in data["xrefs"]:
if not is_high_quality(xref):
continue
updated = as_xref(xref)
value = key(updated)
if value not in seen:
xrefs.append(updated)
seen.add(value)
result["sequence"] = result["sequence"].upper().replace("U", "T")
result["xrefs"] = xrefs
return result
def generate_file(raw, output, schema_file=None):
results = [builder(b) for b in psql.json_handler(raw)]
if schema_file:
with open(schema_file, "r") as raw:
validate(results, json.load(raw))
json.dump(results, output)
| apache-2.0 | Python |
d10b9ae52a6d1894ff2b6686c502fa398b2753da | fix bug of viz | cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa | bin/sofa_viz.py | bin/sofa_viz.py | import os
import subprocess
import sys
from functools import partial
from sofa_config import *
from sofa_print import *
def sofa_viz(cfg):
print_warning(cfg,
'If your rendering timeline is slow, please try \033[4msofa report --plot_ratio=10\033[24m to downsample scatter points,')
print_warning(cfg, 'and then \033[4msofa viz\033[24m to see the downsampled results.')
print_hint('SOFA Vlization is listening on port \033[4m\033[97mhttp://localhost:%d\033[24m\033[0m\033[24m' % (cfg.viz_port) )
print_hint('To change port, please run command: \033[4msofa viz --viz_port=PortNumber\033[24m')
print_hint('Please open your browser to start profiling.')
print_hint('After profiling, please enter Ctrl+C to exit.')
os.system('cd %s && python3 -m http.server %d 2>&1 1> /dev/null; cd -' % (cfg.logdir,cfg.viz_port))
| import os
import subprocess
import sys
from functools import partial
from sofa_config import *
from sofa_print import *
def sofa_viz(cfg):
print_warning(cfg,
'If your rendering timeline is slow, please try \033[4msofa report --plot_ratio=10\033[24m to downsample scatter points,')
print_warning(cfg, 'and then \033[4msofa viz\033[24m to see the downsampled results.')
print_hint('SOFA Vlization is listening on port \033[4m\033[97mhttp://localhost:%d\033[24m\033[0m\033[24m' % (cfg.viz_port) )
print_hint('To change port, please run command: \033[4msofa viz --viz_port=PortNumber\033[24m')
print_hint('Please open your browser to start profiling.')
print_hint('After profiling, please enter Ctrl+C to exit.')
os.system('cd %s && python3.6 -m http.server %d 2>&1 1> /dev/null; cd -' % (cfg.logdir,cfg.viz_port))
| apache-2.0 | Python |
a935cc920624d2b539fb49fabf87a22a5be89789 | Update for Python 3.2; encode postdata as ASCII | clarifyeducation/coursenotify,mutantmonkey/coursenotify | coursenotify.py | coursenotify.py | #!/usr/bin/python
import urllib.parse, urllib.request, urllib.error
import re
import config
from emailgateway import EmailGateway
from time import sleep
nosectex = b"NO SECTIONS FOUND FOR THIS INQUIRY."
coursenrex = b"<TD class=deleft style=background-color:WHITE>\n<FONT SIZE=\"1\">(.+?)<\/FONT>\n<\/TD>"
coursetitex = b"<TD class=deleft style=background-color:WHITE>(.+?)<\/TD>"
postdata = {
'CAMPUS' : config.campus,
'TERMYEAR': config.termyear,
'CORE_CODE' : "AR%",
'SUBJ_CODE' : "%",
'SCHDTYPE' : "%",
'BTN_PRESSED' : "FIND class sections",
'inst_name' : "",
}
gateway = EmailGateway(config.from_addr, config.smtp_host, config.smtp_port, config.smtp_tls, config.smtp_user, config.smtp_pass)
def check_sections():
for crn in config.crns:
postdata['crn'] = crn
postdata['open_only'] = ""
# ensure that section exists
#encoded = urllib.parse.urlencode(postdata)
#page = urllib.request.urlopen(url, data=encoded)
#result = page.read()
#if re.search(nosectex, result) is not None:
#print("CRN %d: Section does not exist" % crn)
# check to see if there are open seats
postdata['open_only'] = "on"
encoded = urllib.parse.urlencode(postdata).encode('ascii')
page = urllib.request.urlopen(config.url, data=encoded)
result = page.read()
if re.search(nosectex, result) is None:
coursenr = re.search(coursenrex, result).group(1).decode('ascii')
coursetitle = re.search(coursetitex, result).group(1).decode('ascii')
#print("CRN %d: Section open" % crn)
gateway.send(config.notify_addr, "coursenotify: %s open" % coursenr, """Hello,
This message is to inform you that at last run, coursenotify
found an open seat in %s %s, CRN %d.
You will continue to receive notifications the next time coursenotify
runs unless you remove this CRN from your configuration.
""" % (coursenr, coursetitle, crn))
#else:
# print("CRN %d: Section full" % crn)
check_sections()
| #!/usr/bin/python
import urllib.parse, urllib.request, urllib.error
import re
import config
from emailgateway import EmailGateway
from time import sleep
nosectex = b"NO SECTIONS FOUND FOR THIS INQUIRY."
coursenrex = b"<TD class=deleft style=background-color:WHITE>\n<FONT SIZE=\"1\">(.+?)<\/FONT>\n<\/TD>"
coursetitex = b"<TD class=deleft style=background-color:WHITE>(.+?)<\/TD>"
postdata = {
'CAMPUS' : config.campus,
'TERMYEAR': config.termyear,
'CORE_CODE' : "AR%",
'SUBJ_CODE' : "%",
'SCHDTYPE' : "%",
'BTN_PRESSED' : "FIND class sections",
'inst_name' : "",
}
gateway = EmailGateway(config.from_addr, config.smtp_host, config.smtp_port, config.smtp_tls, config.smtp_user, config.smtp_pass)
def check_sections():
for crn in config.crns:
postdata['crn'] = crn
postdata['open_only'] = ""
# ensure that section exists
#encoded = urllib.parse.urlencode(postdata)
#page = urllib.request.urlopen(url, data=encoded)
#result = page.read()
#if re.search(nosectex, result) is not None:
#print("CRN %d: Section does not exist" % crn)
# check to see if there are open seats
postdata['open_only'] = "on"
encoded = urllib.parse.urlencode(postdata)
page = urllib.request.urlopen(config.url, data=encoded)
result = page.read()
if re.search(nosectex, result) is None:
coursenr = re.search(coursenrex, result).group(1).decode('ascii')
coursetitle = re.search(coursetitex, result).group(1).decode('ascii')
#print("CRN %d: Section open" % crn)
gateway.send(config.notify_addr, "coursenotify: %s open" % coursenr, """Hello,
This message is to inform you that at last run, coursenotify
found an open seat in %s %s, CRN %d.
You will continue to receive notifications the next time coursenotify
runs unless you remove this CRN from your configuration.
""" % (coursenr, coursetitle, crn))
#else:
# print("CRN %d: Section full" % crn)
check_sections()
| isc | Python |
23f5ff5f7e75df9665e650412bbc42ccbf6aa287 | Add method get_cluster_message | studiawan/pygraphc | pygraphc/anomaly/SentimentAnalysis.py | pygraphc/anomaly/SentimentAnalysis.py | from textblob import TextBlob
from operator import itemgetter
class SentimentAnalysis(object):
"""Get sentiment analysis with only positive and negative considered.
Positive means normal logs and negative sentiment refers to possible attacks.
This class uses sentiment analysis feature from the TextBlob library [Loria2016]_.
References
----------
.. [Loria2016] Steven Loria and the contributors, TextBlob: Simple, Pythonic, text processing--Sentiment analysis,
part-of-speech tagging, noun phrase extraction, translation, and more.
https://github.com/sloria/TextBlob/
"""
def __init__(self, graph, clusters):
self.graph = graph
self.clusters = clusters
self.cluster_message = {}
def get_cluster_message(self):
"""Get most frequent message in a cluster.
"""
word_frequency = {}
for cluster_id, cluster in self.clusters.iteritems():
# get word frequency per cluster
frequency = {}
for node in cluster:
event = self.graph.node[node]['preprocessed_event']
for word in event.split():
frequency[word] = frequency.get(word, 0) + 1
# sorted_frequency = dict(sorted(frequency.items(), key=itemgetter(1), reverse=True))
# word_frequency[cluster_id] = sorted_frequency
# self.cluster_message[cluster_id] = ' '.join(sorted_frequency.keys())
word_frequency[cluster_id] = frequency
self.cluster_message[cluster_id] = ' '.join(frequency.keys())
def get_sentiment(self):
"""Get negative or positive sentiment.
Default score for sentiment score is -1 to 1. The value that close to 1 means more positive and vice versa.
Returns
-------
sentiment_score : dict
A dictionary containing key: cluster id and value: sentiment score.
"""
sentiment_score = {}
for cluster_id, message in self.cluster_message.iteritems():
possible_sentiment = TextBlob(message)
if possible_sentiment.sentiment.polarity >= 0.:
sentiment_score[cluster_id] = possible_sentiment.sentiment.polarity
elif possible_sentiment.sentiment.polarity < 0.:
sentiment_score[cluster_id] = possible_sentiment.sentiment.polarity
return sentiment_score
def get_normalized_sentiment(self):
"""Get normalized sentiment score.
Returns
-------
normalized_score : dict
A dictionary containing key: cluster id and value: normalized sentiment score.
"""
sentiment_score = self.get_sentiment()
normalized_score = {}
min_score = min(sentiment_score.values())
max_score = max(sentiment_score.values())
for cluster_id, score in sentiment_score.iteritems():
normalized_score[cluster_id] = (score - min_score) / (max_score - min_score)
return normalized_score
| from textblob import TextBlob
class SentimentAnalysis(object):
"""Get sentiment analysis with only positive and negative considered.
Positive means normal logs and negative sentiment refers to possible attacks.
This class uses sentiment analysis feature from the TextBlob library [Loria2016]_.
References
----------
.. [Loria2016] Steven Loria and the contributors, TextBlob: Simple, Pythonic, text processing--Sentiment analysis,
part-of-speech tagging, noun phrase extraction, translation, and more.
https://github.com/sloria/TextBlob/
"""
def __init__(self, cluster_message):
self.cluster_message = cluster_message
def get_sentiment(self):
"""Get negative or positive sentiment.
Default score for sentiment score is -1 to 1. The value that close to 1 means more positive and vice versa.
Returns
-------
sentiment_score : dict
A dictionary containing key: cluster id and value: sentiment score.
"""
sentiment_score = {}
for cluster_id, message in self.cluster_message.iteritems():
possible_sentiment = TextBlob(message)
if possible_sentiment.sentiment.polarity >= 0.:
sentiment_score[cluster_id] = possible_sentiment.sentiment.polarity
elif possible_sentiment.sentiment.polarity < 0.:
sentiment_score[cluster_id] = possible_sentiment.sentiment.polarity
return sentiment_score
def get_normalized_sentiment(self):
"""Get normalized sentiment score.
Returns
-------
normalized_score : dict
A dictionary containing key: cluster id and value: normalized sentiment score.
"""
sentiment_score = self.get_sentiment()
normalized_score = {}
min_score = min(sentiment_score.values())
max_score = max(sentiment_score.values())
for cluster_id, score in sentiment_score.iteritems():
normalized_score[cluster_id] = (score - min_score) / (max_score - min_score)
return normalized_score
| mit | Python |
62cc569446f7217fa768f260418e504373f05621 | Use one_hot=True when building MNIST | sandeepkbhat/pylearn2,KennethPierce/pylearnk,kastnerkyle/pylearn2,Refefer/pylearn2,JesseLivezey/plankton,daemonmaker/pylearn2,mclaughlin6464/pylearn2,TNick/pylearn2,abergeron/pylearn2,aalmah/pylearn2,theoryno3/pylearn2,caidongyun/pylearn2,hyqneuron/pylearn2-maxsom,woozzu/pylearn2,Refefer/pylearn2,chrish42/pylearn,hyqneuron/pylearn2-maxsom,matrogers/pylearn2,se4u/pylearn2,sandeepkbhat/pylearn2,ashhher3/pylearn2,matrogers/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,sandeepkbhat/pylearn2,mclaughlin6464/pylearn2,msingh172/pylearn2,kose-y/pylearn2,fyffyt/pylearn2,theoryno3/pylearn2,se4u/pylearn2,CIFASIS/pylearn2,jeremyfix/pylearn2,fulmicoton/pylearn2,kose-y/pylearn2,daemonmaker/pylearn2,nouiz/pylearn2,pkainz/pylearn2,hantek/pylearn2,nouiz/pylearn2,JesseLivezey/pylearn2,fulmicoton/pylearn2,matrogers/pylearn2,woozzu/pylearn2,jeremyfix/pylearn2,ashhher3/pylearn2,chrish42/pylearn,fishcorn/pylearn2,jamessergeant/pylearn2,aalmah/pylearn2,jeremyfix/pylearn2,pkainz/pylearn2,cosmoharrigan/pylearn2,lisa-lab/pylearn2,w1kke/pylearn2,matrogers/pylearn2,skearnes/pylearn2,w1kke/pylearn2,fyffyt/pylearn2,Refefer/pylearn2,JesseLivezey/plankton,caidongyun/pylearn2,cosmoharrigan/pylearn2,daemonmaker/pylearn2,Refefer/pylearn2,fyffyt/pylearn2,lancezlin/pylearn2,chrish42/pylearn,shiquanwang/pylearn2,jeremyfix/pylearn2,nouiz/pylearn2,hyqneuron/pylearn2-maxsom,jamessergeant/pylearn2,KennethPierce/pylearnk,woozzu/pylearn2,lancezlin/pylearn2,lunyang/pylearn2,se4u/pylearn2,alexjc/pylearn2,alexjc/pylearn2,fulmicoton/pylearn2,daemonmaker/pylearn2,CIFASIS/pylearn2,cosmoharrigan/pylearn2,theoryno3/pylearn2,caidongyun/pylearn2,pombredanne/pylearn2,KennethPierce/pylearnk,KennethPierce/pylearnk,CIFASIS/pylearn2,sandeepkbhat/pylearn2,chrish42/pylearn,fulmicoton/pylearn2,junbochen/pylearn2,junbochen/pylearn2,w1kke/pylearn2,JesseLivezey/pylearn2,JesseLivezey/plankton,caidongyun/pylearn2,skearnes/pylearn2,kose-y/pylearn2,kose-y/pylearn2,fishcorn/pylearn2,CIFASIS/pylearn2,woozzu/pylearn2,fishcorn/pylearn2,cosmoharrigan/pylearn2,bartvm/pylearn2,lamblin/pylearn2,bartvm/pylearn2,alexjc/pylearn2,pombredanne/pylearn2,lisa-lab/pylearn2,TNick/pylearn2,lisa-lab/pylearn2,hantek/pylearn2,ddboline/pylearn2,ashhher3/pylearn2,shiquanwang/pylearn2,lisa-lab/pylearn2,ddboline/pylearn2,nouiz/pylearn2,goodfeli/pylearn2,abergeron/pylearn2,w1kke/pylearn2,ashhher3/pylearn2,TNick/pylearn2,lamblin/pylearn2,lancezlin/pylearn2,msingh172/pylearn2,ddboline/pylearn2,shiquanwang/pylearn2,goodfeli/pylearn2,fyffyt/pylearn2,skearnes/pylearn2,mkraemer67/pylearn2,junbochen/pylearn2,pombredanne/pylearn2,mclaughlin6464/pylearn2,hantek/pylearn2,aalmah/pylearn2,alexjc/pylearn2,jamessergeant/pylearn2,pombredanne/pylearn2,abergeron/pylearn2,JesseLivezey/pylearn2,msingh172/pylearn2,fishcorn/pylearn2,hyqneuron/pylearn2-maxsom,abergeron/pylearn2,hantek/pylearn2,se4u/pylearn2,kastnerkyle/pylearn2,ddboline/pylearn2,TNick/pylearn2,lancezlin/pylearn2,theoryno3/pylearn2,shiquanwang/pylearn2,goodfeli/pylearn2,lunyang/pylearn2,mkraemer67/pylearn2,pkainz/pylearn2,bartvm/pylearn2,junbochen/pylearn2,lamblin/pylearn2,skearnes/pylearn2,goodfeli/pylearn2,bartvm/pylearn2,lamblin/pylearn2,JesseLivezey/pylearn2,mclaughlin6464/pylearn2,aalmah/pylearn2,kastnerkyle/pylearn2,pkainz/pylearn2,mkraemer67/pylearn2,JesseLivezey/plankton,lunyang/pylearn2,msingh172/pylearn2,kastnerkyle/pylearn2,lunyang/pylearn2 | pylearn2/datasets/tests/test_mnist.py | pylearn2/datasets/tests/test_mnist.py | from pylearn2.datasets.mnist import MNIST
import unittest
from pylearn2.testing.skip import skip_if_no_data
import numpy as np
class TestMNIST(unittest.TestCase):
def setUp(self):
skip_if_no_data()
self.train = MNIST(which_set = 'train', one_hot=True)
self.test = MNIST(which_set = 'test', one_hot=True)
def test_range(self):
"""Tests that the data spans [0,1]"""
for X in [self.train.X, self.test.X ]:
assert X.min() == 0.0
assert X.max() == 1.0
def test_topo(self):
"""Tests that a topological batch has 4 dimensions"""
topo = self.train.get_batch_topo(1)
assert topo.ndim == 4
def test_topo_c01b(self):
"""
Tests that a topological batch with axes ('c',0,1,'b')
can be dimshuffled back to match the standard ('b',0,1,'c')
format.
"""
batch_size = 100
c01b_test = MNIST(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size,:]
c01b = c01b_test.get_topological_view(c01b_X)
assert c01b.shape == (1, 28, 28, batch_size)
b01c = c01b.transpose(3,1,2,0)
b01c_X = self.test.X[0:batch_size,:]
assert c01b_X.shape == b01c_X.shape
assert np.all(c01b_X == b01c_X)
b01c_direct = self.test.get_topological_view(b01c_X)
assert b01c_direct.shape == b01c.shape
assert np.all(b01c_direct == b01c)
| from pylearn2.datasets.mnist import MNIST
import unittest
from pylearn2.testing.skip import skip_if_no_data
import numpy as np
class TestMNIST(unittest.TestCase):
def setUp(self):
skip_if_no_data()
self.train = MNIST(which_set = 'train')
self.test = MNIST(which_set = 'test')
def test_range(self):
"""Tests that the data spans [0,1]"""
for X in [self.train.X, self.test.X ]:
assert X.min() == 0.0
assert X.max() == 1.0
def test_topo(self):
"""Tests that a topological batch has 4 dimensions"""
topo = self.train.get_batch_topo(1)
assert topo.ndim == 4
def test_topo_c01b(self):
"""
Tests that a topological batch with axes ('c',0,1,'b')
can be dimshuffled back to match the standard ('b',0,1,'c')
format.
"""
batch_size = 100
c01b_test = MNIST(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size,:]
c01b = c01b_test.get_topological_view(c01b_X)
assert c01b.shape == (1, 28, 28, batch_size)
b01c = c01b.transpose(3,1,2,0)
b01c_X = self.test.X[0:batch_size,:]
assert c01b_X.shape == b01c_X.shape
assert np.all(c01b_X == b01c_X)
b01c_direct = self.test.get_topological_view(b01c_X)
assert b01c_direct.shape == b01c.shape
assert np.all(b01c_direct == b01c)
| bsd-3-clause | Python |
025867a11bbc2bb3f85d44ef44fde059b92557bb | add gcloud-python header to user agent (#9551) | tswast/google-cloud-python,tswast/google-cloud-python,googleapis/google-cloud-python,GoogleCloudPlatform/gcloud-python,tseaver/google-cloud-python,googleapis/google-cloud-python,tseaver/google-cloud-python,tseaver/google-cloud-python,tswast/google-cloud-python,GoogleCloudPlatform/gcloud-python | storage/google/cloud/storage/_http.py | storage/google/cloud/storage/_http.py | # Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Storage connections."""
from google.cloud import _http
from google.cloud.storage import __version__
class Connection(_http.JSONConnection):
"""A connection to Google Cloud Storage via the JSON REST API.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: The client that owns the current connection.
:type client_info: :class:`~google.api_core.client_info.ClientInfo`
:param client_info: (Optional) instance used to generate user agent.
"""
DEFAULT_API_ENDPOINT = "https://storage.googleapis.com"
def __init__(self, client, client_info=None, api_endpoint=DEFAULT_API_ENDPOINT):
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint
self._client_info.client_library_version = __version__
# TODO: When metrics all use gccl, this should be removed #9552
if self._client_info.user_agent is None: # pragma: no branch
self._client_info.user_agent = ""
self._client_info.user_agent += " gcloud-python/{} ".format(__version__)
API_VERSION = "v1"
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = "{api_base_url}/storage/{api_version}{path}"
"""A template for the URL of a particular API call."""
| # Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Storage connections."""
from google.cloud import _http
from google.cloud.storage import __version__
class Connection(_http.JSONConnection):
"""A connection to Google Cloud Storage via the JSON REST API.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: The client that owns the current connection.
:type client_info: :class:`~google.api_core.client_info.ClientInfo`
:param client_info: (Optional) instance used to generate user agent.
"""
DEFAULT_API_ENDPOINT = "https://storage.googleapis.com"
def __init__(self, client, client_info=None, api_endpoint=DEFAULT_API_ENDPOINT):
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint
self._client_info.client_library_version = __version__
API_VERSION = "v1"
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = "{api_base_url}/storage/{api_version}{path}"
"""A template for the URL of a particular API call."""
| apache-2.0 | Python |
6f6cb4760c76f18b859a3c559914ab795b81222e | Fix migration for tests | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/mails/migrations/0002_auto_20171211_1117.py | bluebottle/mails/migrations/0002_auto_20171211_1117.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-11 09:38
from __future__ import unicode_literals
import urllib2
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.db import migrations, connection
def scrape_mail_logo(apps, schema_editor):
Client = apps.get_model('clients', 'Client')
tenant = Client.objects.get(schema_name=connection.tenant.schema_name)
if 'localhost' in tenant.domain_url:
logo_url = 'http://' + tenant.domain_url + ':4200/images/logo-email.gif'
else:
logo_url = 'https://' + tenant.domain_url + '/images/logo-email.gif'
MailPlatformSettings = apps.get_model('mails', 'MailPlatformSettings')
from django.core.files import File
try:
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(urllib2.urlopen(logo_url).read())
img_temp.flush()
mail_settings, _ = MailPlatformSettings.objects.get_or_create()
mail_settings.email_logo.save('logo-email.gif', File(img_temp))
except urllib2.URLError:
pass
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('mails', '0001_initial'),
]
operations = [
migrations.RunPython(scrape_mail_logo, backward)
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-11 09:38
from __future__ import unicode_literals
import urllib2
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.db import migrations, connection
def scrape_mail_logo(apps, schema_editor):
Client = apps.get_model('clients', 'Client')
tenant = Client.objects.get(schema_name=connection.tenant.schema_name)
if 'localhost' in tenant.domain_url:
logo_url = 'http://' + tenant.domain_url + ':4200/images/logo-email.gif'
else:
logo_url = 'https://' + tenant.domain_url + '/images/logo-email.gif'
MailPlatformSettings = apps.get_model('mails', 'MailPlatformSettings')
from django.core.files import File
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(urllib2.urlopen(logo_url).read())
img_temp.flush()
mail_settings, _ = MailPlatformSettings.objects.get_or_create()
mail_settings.email_logo.save('logo-email.gif', File(img_temp))
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('mails', '0001_initial'),
]
operations = [
migrations.RunPython(scrape_mail_logo, backward)
]
| bsd-3-clause | Python |
eba363f161d0d026bbde295efe814fa6351d94e0 | Document the FFT | msullivan/advent-of-code,msullivan/advent-of-code,msullivan/advent-of-code | 2019/16b.alt.py | 2019/16b.alt.py | #!/usr/bin/env python3
"""
This does an honest computation of the Flawed Frequency Transmission
that doesn't rely on the offset being large. It computes all of the
digits of the FFT. On my laptop it ran in two minutes using pypy.
The trick is that we compute a partial sums array,
where partials[0] = 0 and partials[i+1] = l[0]+...+l[i-1].
We can then quickly compute the sum l[i]+...+l[j]
as partials[j+1] - partials[i].
Then, for each output element `i`, for each run of `i` 1s or -1s in the
pattern, we can compute the sum of the corresponding input elements in
constant time, multiply it by 1 or -1, and add it to our running sum.
This means that computing output element `i` takes O(N/i) time.
Since I am told that 1/1 + 1/2 + 1/3 + ... + 1/N is O(lg N)
(https://en.wikipedia.org/wiki/Harmonic_number),
this FFT runs in O(N lg N) time.
"""
from __future__ import print_function
import sys
import time
def go(i, partials):
"""Compute one element value from the FFT.
It is kind of uglified for performance."""
step = 2 * i
end = len(partials) - 1
mode = 1
lo = -1 + i
hi = lo + i
res = 0
# We loop until hi is too big and then do a separate check for lo
# so that we can avoid having to bound hi inside the loop each
# iteration. This made things like 15% faster.
while hi < end:
res += mode * (partials[hi] - partials[lo])
mode = -mode
lo += step
hi = lo + i
if lo < end:
hi = min(lo + i, end)
res += mode * (partials[hi] - partials[lo])
return abs(res) % 10
def fft(l):
"""Fucked Fourier Transform"""
partials = [0]
sum = 0
for v in l:
sum += v
partials.append(sum)
x = []
for i, y in enumerate(l):
x.append(go(i+1, partials))
return x
def display(data, offset):
return ''.join(str(x) for x in data[offset:offset+8])
def main(args):
orig_data = [int(x) for x in [s.strip() for s in sys.stdin][0]]
data = orig_data * 10000
offset = int(''.join(str(x) for x in data[:7]))
for i in range(100):
print(i, display(data, 0), display(data, offset))
data = fft(data)
print(i, display(data, 0), display(data, offset))
print(display(data, offset))
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/env python3
# This does an honest computation of the Flawed Frequency Transmission
# that doesn't rely on the offset being large. It computes all of the digits
# of the FFT.
# On my laptop it ran in two minutes using pypy.
from __future__ import print_function
import sys
import time
def go(n, partials):
"""Compute one element value from the FFT.
It is kind of uglified for performance."""
step = 2 * n
end = len(partials) - 1
mode = 1
lo = -1 + n
hi = lo + n
res = 0
# We loop until hi is too big and then do a separate check for lo
# so that we can avoid having to bound hi inside the loop each
# iteration. This made things like 15% faster.
while hi < end:
res += mode * (partials[hi] - partials[lo])
mode = -mode
lo += step
hi = lo + n
if lo < end:
hi = min(lo + n, end)
res += mode * (partials[hi] - partials[lo])
return abs(res) % 10
def fft(l):
"""Fucked Fourier Transform"""
partials = [0]
sum = 0
for v in l:
sum += v
partials.append(sum)
x = []
for i, y in enumerate(l):
x.append(go(i+1, partials))
return x
def display(data, offset):
return ''.join(str(x) for x in data[offset:offset+8])
def main(args):
orig_data = [int(x) for x in [s.strip() for s in sys.stdin][0]]
data = orig_data * 10000
offset = int(''.join(str(x) for x in data[:7]))
for i in range(10):
print(i, display(data, 0), display(data, offset))
data = fft(data)
print(display(data, offset))
if __name__ == '__main__':
main(sys.argv)
| mit | Python |
b0a0cf81ee91226a5ab327d5e2aaa5b7eeb4c984 | set the name of the run to be the title in summary_plots() | joelagnel/trappy,joelagnel/trappy,bjackman/trappy,sinkap/trappy,ARM-software/trappy,joelagnel/trappy,ARM-software/trappy,ARM-software/trappy,sinkap/trappy,JaviMerino/trappy,derkling/trappy,JaviMerino/trappy,bjackman/trappy,sinkap/trappy,joelagnel/trappy,ARM-software/trappy,sinkap/trappy,derkling/trappy,bjackman/trappy,derkling/trappy,JaviMerino/trappy,bjackman/trappy | cr2/__init__.py | cr2/__init__.py | #!/usr/bin/python
from pid_controller import PIDController
from power import OutPower, InPower
from thermal import Thermal, ThermalGovernor
from run import Run
from results import CR2, get_results, combine_results
def summary_plots(actor_order, map_label, **kwords):
"""A summary of plots as similar as possible to what CompareRuns plots
actor_order must be an array showing the order in which the actors
where registered. The array values are the labels that will be
used in the input and output power plots. E.g. actor_order can be
["GPU", "A15", "A7]
map_label has to be a dict that matches cpumasks (as found in the
trace) with their proper name. This "proper name" will be used as
a label for the load and allfreqs plots. It's recommended that
the names of the cpus matches those in actor_order. map_label can
be {"0000000f": "A7", "000000f0": "A15"}
"""
import plot_utils
if type(actor_order) is not list:
raise TypeError("actor_order has to be an array")
if type(map_label) is not dict:
raise TypeError("map_label has to be a dict")
if "path" in kwords:
path = kwords["path"]
del kwords["path"]
else:
path = None
if "width" not in kwords:
kwords["width"] = 20
if "height" not in kwords:
kwords["height"] = 5
if "title" in kwords:
title = kwords["title"]
else:
title = ""
kwords_wout_title = kwords.copy()
if "title" in kwords_wout_title:
del kwords_wout_title["title"]
run_data = Run(path=path, name=title)
basetime = run_data.thermal.data_frame.index[0]
run_data.normalize_time(basetime)
plot_utils.plot_temperature([run_data], **kwords_wout_title)
run_data.in_power.plot_load(map_label, **kwords)
run_data.plot_allfreqs(map_label, **kwords)
run_data.pid_controller.plot_controller(**kwords)
run_data.thermal_governor.plot_input_power(actor_order, **kwords)
run_data.thermal_governor.plot_output_power(actor_order, **kwords)
run_data.plot_power_hists(map_label, title)
run_data.thermal.plot_temperature_hist(title)
| #!/usr/bin/python
from pid_controller import PIDController
from power import OutPower, InPower
from thermal import Thermal, ThermalGovernor
from run import Run
from results import CR2, get_results, combine_results
def summary_plots(actor_order, map_label, **kwords):
"""A summary of plots as similar as possible to what CompareRuns plots
actor_order must be an array showing the order in which the actors
where registered. The array values are the labels that will be
used in the input and output power plots. E.g. actor_order can be
["GPU", "A15", "A7]
map_label has to be a dict that matches cpumasks (as found in the
trace) with their proper name. This "proper name" will be used as
a label for the load and allfreqs plots. It's recommended that
the names of the cpus matches those in actor_order. map_label can
be {"0000000f": "A7", "000000f0": "A15"}
"""
import plot_utils
if type(actor_order) is not list:
raise TypeError("actor_order has to be an array")
if type(map_label) is not dict:
raise TypeError("map_label has to be a dict")
if "path" in kwords:
path = kwords["path"]
del kwords["path"]
else:
path = None
run_data = Run(path=path)
basetime = run_data.thermal.data_frame.index[0]
run_data.normalize_time(basetime)
if "width" not in kwords:
kwords["width"] = 20
if "height" not in kwords:
kwords["height"] = 5
if "title" in kwords:
title = kwords["title"]
else:
title = ""
plot_temp_kwords = kwords.copy()
if "title" in plot_temp_kwords:
del plot_temp_kwords["title"]
plot_utils.plot_temperature([run_data], **plot_temp_kwords)
run_data.in_power.plot_load(map_label, **kwords)
run_data.plot_allfreqs(map_label, **kwords)
run_data.pid_controller.plot_controller(**kwords)
run_data.thermal_governor.plot_input_power(actor_order, **kwords)
run_data.thermal_governor.plot_output_power(actor_order, **kwords)
run_data.plot_power_hists(map_label, title)
run_data.thermal.plot_temperature_hist(title)
| apache-2.0 | Python |
96b53217c52f02cfd696a6694865d4c43fdf0603 | add adaptive protect | MySmile/mysmile,MySmile/mysmile | apps/pages/templatetags/email2img.py | apps/pages/templatetags/email2img.py | import os, PIL
from PIL import Image, ImageDraw, ImageFont
from django import template
register = template.Library()
from mysmile.settings.main import STATIC_ROOT, BASE_DIR, DEBUG
@register.filter(name='email2img')
def email2img(email):
""" protect email via img
"""
try:
os.path.isfile(STATIC_ROOT+'static/images/email2img.png')
if DEBUG:
email_mod_time = round(os.stat(BASE_DIR+'config/local.py').st_mtime)
else:
email_mod_time = round(os.stat(BASE_DIR+'config/production.py').st_mtime)
email2img_mod_time = round(os.stat(STATIC_ROOT+'static/images/email2img.png').st_mtime)
if email_mod_time > email2img_mod_time:
raise IOError
else:
return """<img src="/static/images/email2img.png" alt="email">"""
except (OSError, IOError):
color_mode="RGBA"
background_color=(0,0,0,0) # full transparent
fontfile = STATIC_ROOT+'static/fonts/TimesNewRomanCE.ttf'
fontsize = 16
try:
font = ImageFont.truetype(fontfile, fontsize)
width, height = font.getsize(email)
# add fontsize%10 for fix some visual bug
im = Image.new(color_mode, (width, height+fontsize%10), background_color)
draw = ImageDraw.Draw(im)
draw.text((0,0), email, (0,0,0), font=font)
img_full_path = STATIC_ROOT+'static/images/email2img.png'
im.save(img_full_path)
except Exception:
# return non-protected email. In future: log this error!
return '<a href="mailto:'+email+'">'+email+'</a>'
else:
return """<img src="/static/images/email2img.png" alt="email" />"""
| import os, PIL
from PIL import Image, ImageDraw, ImageFont
from django import template
register = template.Library()
from mysmile.settings.main import STATIC_ROOT
@register.filter(name='email2img')
def email2img(email):
""" protect email via img
"""
if os.path.isfile(STATIC_ROOT+'static/images/email2img.png'):
return """<img src="/static/images/email2img.png" alt="email">"""
else:
color_mode="RGBA"
background_color=(0,0,0,0) # full transparent
fontfile = STATIC_ROOT+'static/fonts/TimesNewRomanCE.ttf'
fontsize = 16
try:
font = ImageFont.truetype(fontfile, fontsize)
width, height = font.getsize(email)
# add fontsize%10 for fix some visual bug
im = Image.new(color_mode, (width, height+fontsize%10), background_color)
draw = ImageDraw.Draw(im)
draw.text((0,0), email, (0,0,0), font=font)
img_full_path = STATIC_ROOT+'static/images/email2img.png'
im.save(img_full_path)
except Exception:
# return non-protected email. In future: log this error!
return '<a href="mailto:'+email+'">'+email+'</a>'
else:
return """<img src="/static/images/email2img.png" alt="email" />"""
| bsd-3-clause | Python |
1b09b30897d92a71e353bd5fd7c7ff2680f74a01 | Fix sending of root posts | fi-ksi/web-backend,fi-ksi/web-backend | endpoint/post.py | endpoint/post.py | import json, falcon
from db import session
import model
import util
from thread import Thread
class Post(object):
def on_put(self, req, resp, id):
self.on_get(req, resp, id)
def on_get(self, req, resp, id):
user_id = req.context['user'].get_id() if req.context['user'].is_logged_in() else None
post = session.query(model.Post).get(id)
req.context['result'] = { 'post': util.post.to_json(post, user_id) }
class Posts(object):
def on_post(self, req, resp):
if not req.context['user'].is_logged_in():
resp.status = falcon.HTTP_400
return
user = req.context['user']
user_id = user.id
data = json.loads(req.stream.read())['post']
thread_id = data['thread']
thread = session.query(model.Thread).get(thread_id)
if thread is None:
resp.status = falcon.HTTP_400
return
if not thread.public:
task_thread = session.query(model.Task).filter(model.Task.thread == thread_id).first()
if task_thread and util.task.status(task_thread, user) == util.TaskStatus.LOCKED:
resp.status = falcon.HTTP_400
return
solution_thread = session.query(model.SolutionComment).filter(model.SolutionComment.thread == thread_id, model.SolutionComment.user == user_id).first()
if not solution_thread:
resp.status = falcon.HTTP_400
return
parent = data['parent']
if parent and not session.query(model.Post).filter(model.Post.id == parent, model.Post.thread == thread_id).first():
resp.status = falcon.HTTP_400
return
post = model.Post(thread=thread_id, author=user_id, body=data['body'], parent=parent)
session.add(post)
session.commit()
req.context['result'] = { 'post': util.post.to_json(post, user_id) }
session.close()
| import json, falcon
from db import session
import model
import util
from thread import Thread
class Post(object):
def on_put(self, req, resp, id):
self.on_get(req, resp, id)
def on_get(self, req, resp, id):
user_id = req.context['user'].get_id() if req.context['user'].is_logged_in() else None
post = session.query(model.Post).get(id)
req.context['result'] = { 'post': util.post.to_json(post, user_id) }
class Posts(object):
def on_post(self, req, resp):
if not req.context['user'].is_logged_in():
resp.status = falcon.HTTP_400
return
user = req.context['user']
user_id = user.id
data = json.loads(req.stream.read())['post']
thread_id = data['thread']
thread = session.query(model.Thread).get(thread_id)
if thread is None:
resp.status = falcon.HTTP_400
return
if not thread.public:
task_thread = session.query(model.Task).filter(model.Task.thread == thread_id).first()
if task_thread and util.task.status(task_thread, user) == util.TaskStatus.LOCKED:
resp.status = falcon.HTTP_400
return
solution_thread = session.query(model.SolutionComment).filter(model.SolutionComment.thread == thread_id, model.SolutionComment.user == user_id).first()
if not solution_thread:
resp.status = falcon.HTTP_400
return
parent = data['parent']
if not session.query(model.Post).filter(model.Post.id == parent, model.Post.thread == thread_id).first():
resp.status = falcon.HTTP_400
return
post = model.Post(thread=thread_id, author=user_id, body=data['body'], parent=parent)
session.add(post)
session.commit()
req.context['result'] = { 'post': util.post.to_json(post, user_id) }
session.close()
| mit | Python |
459d16bea2f67601c6cf538bd70efaa4a01c28d5 | Remove obsolete api-key | tastatur/PredictionIO,takeshineshiro/PredictionIO,akaash-nigam/PredictionIO,PredictionIO/PredictionIO,jasonchaffee/PredictionIO,nvoron23/PredictionIO,b-cuts/PredictionIO,adamharish/PredictionIO,rbo7nik/PredictionIO,rsganesh83/PredictionIO,arudenko/PredictionIO,schon/PredictionIO,elkingtonmcb/PredictionIO,TheDataShed/PredictionIO,cristiancrc/PredictionIO,beni55/PredictionIO,beni55/PredictionIO,nvoron23/PredictionIO,shimamoto/incubator-predictionio,codingang/PredictionIO,jlegendary/PredictionIO,doron123/PredictionIO,Emaasit/PredictionIO,himanshudhami/PredictionIO,TheDataShed/PredictionIO,TheDataShed/PredictionIO,ydanilenko/PredictionIO,mars/incubator-predictionio,elkingtonmcb/PredictionIO,druzbikova/PredictionIO,mars/incubator-predictionio,adamharish/PredictionIO,pferrel/PredictionIO,arudenko/PredictionIO,michaelshing/PredictionIO,indranig/PredictionIO,prmdsharma/PredictionIO,shimamoto/incubator-predictionio,schon/PredictionIO,mars/incubator-predictionio,skmezanul/PredictionIO,jingyidata/PredictionIO,BojianLi/PredictionIO,ydanilenko/PredictionIO,ch33hau/PredictionIO,arudenko/PredictionIO,takezoe/incubator-predictionio,PredictionIO/PredictionIO,djeraseit/PredictionIO,nvoron23/PredictionIO,djeraseit/PredictionIO,net-shell/PredictionIO,nvoron23/PredictionIO,BuildAPE/PredictionIO,Ribeiro/PredictionIO,BojianLi/PredictionIO,tuxdna/PredictionIO,jlegendary/PredictionIO,rsganesh83/PredictionIO,alex9311/PredictionIO,dszeto/incubator-predictionio,net-shell/PredictionIO,Ribeiro/PredictionIO,ydanilenko/PredictionIO,jlegendary/PredictionIO,prmdsharma/PredictionIO,BojianLi/PredictionIO,cristiancrc/PredictionIO,druzbikova/PredictionIO,atyenoria/PredictionIO,wangmiao1981/PredictionIO,ionux/PredictionIO,Emaasit/PredictionIO,dszeto/incubator-predictionio,jasonchaffee/PredictionIO,initChan/PredictionIO,initChan/PredictionIO,takezoe/incubator-predictionio,hsavit1/PredictionIO,ch33hau/PredictionIO,sevenihust/PredictionIO,sekaiamber/PredictionIO,himanshudhami/PredictionIO,adamharish/PredictionIO,biddyweb/PredictionIO,Emaasit/PredictionIO,wenaz/PredictionIO,marevol/incubator-predictionio,b-cuts/PredictionIO,takeshineshiro/PredictionIO,BuildAPE/PredictionIO,tuxdna/PredictionIO,BojianLi/PredictionIO,net-shell/PredictionIO,jlegendary/PredictionIO,PredictionIO/PredictionIO,tastatur/PredictionIO,TheDataShed/PredictionIO,net-shell/PredictionIO,hsavit1/PredictionIO,doron123/PredictionIO,pferrel/PredictionIO,alex9311/PredictionIO,clemp6r/PredictionIO,jasonchaffee/PredictionIO,ionux/PredictionIO,takeshineshiro/PredictionIO,BojianLi/PredictionIO,jingyidata/PredictionIO,doron123/PredictionIO,marevol/incubator-predictionio,atyenoria/PredictionIO,wangmiao1981/PredictionIO,sevenihust/PredictionIO,EmergentOrder/PredictionIO,thiagoveras/PredictionIO,skmezanul/PredictionIO,djeraseit/PredictionIO,wenaz/PredictionIO,Ribeiro/PredictionIO,Ribeiro/PredictionIO,himanshudhami/PredictionIO,akaash-nigam/PredictionIO,sekaiamber/PredictionIO,marevol/incubator-predictionio,ch33hau/PredictionIO,rbo7nik/PredictionIO,tastatur/PredictionIO,zafarella/PredictionIO,stephen-corgiat/PredictionIO,tuxdna/PredictionIO,druzbikova/PredictionIO,jlegendary/PredictionIO,akaash-nigam/PredictionIO,zafarella/PredictionIO,net-shell/PredictionIO,rbo7nik/PredictionIO,doron123/PredictionIO,akaash-nigam/PredictionIO,EmergentOrder/PredictionIO,indranig/PredictionIO,Ribeiro/PredictionIO,ydanilenko/PredictionIO,nvoron23/PredictionIO,codingang/PredictionIO,mars/incubator-predictionio,codingang/PredictionIO,sekaiamber/PredictionIO,michaelshing/PredictionIO,doron123/PredictionIO,EmergentOrder/PredictionIO,thiagoveras/PredictionIO,clemp6r/PredictionIO,skmezanul/PredictionIO,thiagoveras/PredictionIO,codingang/PredictionIO,stephen-corgiat/PredictionIO,wenaz/PredictionIO,rsganesh83/PredictionIO,clemp6r/PredictionIO,takezoe/incubator-predictionio,michaelshing/PredictionIO,BuildAPE/PredictionIO,ionux/PredictionIO,marevol/incubator-predictionio,jingyidata/PredictionIO,hsavit1/PredictionIO,sevenihust/PredictionIO,initChan/PredictionIO,atyenoria/PredictionIO,pferrel/PredictionIO,dszeto/incubator-predictionio,zafarella/PredictionIO,shimamoto/incubator-predictionio,schon/PredictionIO,beni55/PredictionIO,prmdsharma/PredictionIO,indranig/PredictionIO,indranig/PredictionIO,prmdsharma/PredictionIO,atyenoria/PredictionIO,takezoe/incubator-predictionio,indranig/PredictionIO,prmdsharma/PredictionIO,biddyweb/PredictionIO,wangmiao1981/PredictionIO,biddyweb/PredictionIO,himanshudhami/PredictionIO,ydanilenko/PredictionIO,shimamoto/incubator-predictionio,atyenoria/PredictionIO,cristiancrc/PredictionIO,TheDataShed/PredictionIO,elkingtonmcb/PredictionIO,alex9311/PredictionIO,codingang/PredictionIO,b-cuts/PredictionIO,akaash-nigam/PredictionIO,stephen-corgiat/PredictionIO,dszeto/incubator-predictionio | sdk/python-sdk/examples/demo-movielens/app_config.py | sdk/python-sdk/examples/demo-movielens/app_config.py | APP_ID = 2
API_URL = 'http://localhost:7070'
THREADS = 25
REQUEST_QSIZE = 500
|
#APP_KEY = 'bfs7QCga121RitVkToTaoSOFTB4i0clnQbu7zTI8hqWMjVKXL9mj225YC7VfpaIR'
APP_ID = 2
API_URL = 'http://localhost:7070'
THREADS = 25
REQUEST_QSIZE = 500
| apache-2.0 | Python |
04cffe48844bf97050d4ae312b2412c492ed5faa | Make heartbeat output less verbose | mozilla/normandy,Osmose/normandy,Osmose/normandy,mozilla/normandy,mozilla/normandy,mozilla/normandy,Osmose/normandy,Osmose/normandy | normandy/health/api/views.py | normandy/health/api/views.py | import os
from django.conf import settings
from django.core.checks.registry import registry as checks_registry
from django.core.checks import messages as checks_messages
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes
from rest_framework.response import Response
from statsd.defaults.django import statsd
from normandy.base.decorators import short_circuit_middlewares
_commit = None
def get_commit():
global _commit
if _commit is None:
path = os.path.join(settings.BASE_DIR, '__version__', 'commit')
try:
with open(path) as f:
_commit = f.read().strip()
except OSError:
_commit = 'unknown'
return _commit
@api_view(['GET'])
def version(request):
return Response({
'source': 'https://github.com/mozilla/normandy',
'commit': get_commit(),
})
@api_view(['GET'])
def lbheartbeat(request):
# lets the load balancer know the application is running and available
# must return 200 (not 204) for ELB
# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
return Response('', status=status.HTTP_200_OK)
@short_circuit_middlewares
@api_view(['GET'])
@authentication_classes([])
def heartbeat(request):
all_checks = checks_registry.get_checks(include_deployment_checks=not settings.DEBUG)
details = {}
statuses = {}
level = 0
for check in all_checks:
detail = heartbeat_check_detail(check)
statuses[check.__name__] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[check.__name__] = detail
if level < checks_messages.WARNING:
res_status = status.HTTP_200_OK
statsd.incr('heartbeat.pass')
else:
res_status = status.HTTP_500_INTERNAL_SERVER_ERROR
statsd.incr('heartbeat.fail')
return Response({
'status': heartbeat_level_to_text(level),
'checks': statuses,
'details': details,
}, status=res_status)
def heartbeat_level_to_text(level):
statuses = {
0: 'ok',
checks_messages.DEBUG: 'debug',
checks_messages.INFO: 'info',
checks_messages.WARNING: 'warning',
checks_messages.ERROR: 'error',
checks_messages.CRITICAL: 'critical',
}
return statuses.get(level, 'unknown')
def heartbeat_check_detail(check):
errors = check(app_configs=None)
level = 0
level = max([level] + [e.level for e in errors])
return {
'status': heartbeat_level_to_text(level),
'level': level,
'messages': [e.msg for e in errors],
}
| import os
from django.conf import settings
from django.core.checks.registry import registry as checks_registry
from django.core.checks import messages as checks_messages
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes
from rest_framework.response import Response
from statsd.defaults.django import statsd
from normandy.base.decorators import short_circuit_middlewares
_commit = None
def get_commit():
global _commit
if _commit is None:
path = os.path.join(settings.BASE_DIR, '__version__', 'commit')
try:
with open(path) as f:
_commit = f.read().strip()
except OSError:
_commit = 'unknown'
return _commit
@api_view(['GET'])
def version(request):
return Response({
'source': 'https://github.com/mozilla/normandy',
'commit': get_commit(),
})
@api_view(['GET'])
def lbheartbeat(request):
# lets the load balancer know the application is running and available
# must return 200 (not 204) for ELB
# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
return Response('', status=status.HTTP_200_OK)
@short_circuit_middlewares
@api_view(['GET'])
@authentication_classes([])
def heartbeat(request):
all_checks = checks_registry.get_checks(include_deployment_checks=not settings.DEBUG)
details = {check.__name__: heartbeat_check_detail(check) for check in all_checks}
if all(detail['level'] < checks_messages.WARNING for detail in details.values()):
res_status = status.HTTP_200_OK
statsd.incr('heartbeat.pass')
else:
res_status = status.HTTP_500_INTERNAL_SERVER_ERROR
statsd.incr('heartbeat.fail')
return Response(details, status=res_status)
def heartbeat_level_to_text(level):
statuses = {
0: 'ok',
checks_messages.DEBUG: 'debug',
checks_messages.INFO: 'info',
checks_messages.WARNING: 'warning',
checks_messages.ERROR: 'errors',
checks_messages.CRITICAL: 'critical',
}
return statuses.get(level, 'unknown')
def heartbeat_check_detail(check):
errors = check(app_configs=None)
level = 0
level = max([level] + [e.level for e in errors])
return {
'status': heartbeat_level_to_text(level),
'level': level,
'messages': [e.msg for e in errors],
}
| mpl-2.0 | Python |
0fc9fbdf171816f240d902190205420e57f70247 | support for image, scale and alpha parameters | jffernandez/kivy,jegger/kivy,edubrunaldi/kivy,xpndlabs/kivy,rnixx/kivy,iamutkarshtiwari/kivy,LogicalDash/kivy,jkankiewicz/kivy,autosportlabs/kivy,inclement/kivy,yoelk/kivy,rafalo1333/kivy,matham/kivy,darkopevec/kivy,niavlys/kivy,jkankiewicz/kivy,edubrunaldi/kivy,niavlys/kivy,andnovar/kivy,arcticshores/kivy,Shyam10/kivy,cbenhagen/kivy,adamkh/kivy,denys-duchier/kivy,arlowhite/kivy,rafalo1333/kivy,kivatu/kivy-bak,arcticshores/kivy,Farkal/kivy,Cheaterman/kivy,akshayaurora/kivy,Shyam10/kivy,hansent/kivy,bhargav2408/kivy,jffernandez/kivy,kivatu/kivy-bak,vitorio/kivy,kivy/kivy,angryrancor/kivy,el-ethan/kivy,eHealthAfrica/kivy,manthansharma/kivy,xpndlabs/kivy,mSenyor/kivy,iamutkarshtiwari/kivy,rnixx/kivy,mSenyor/kivy,dirkjot/kivy,Farkal/kivy,yoelk/kivy,VinGarcia/kivy,janssen/kivy,thezawad/kivy,el-ethan/kivy,aron-bordin/kivy,el-ethan/kivy,vipulroxx/kivy,JohnHowland/kivy,Davideddu/kivy-forkedtouch,mSenyor/kivy,CuriousLearner/kivy,Davideddu/kivy-forkedtouch,bhargav2408/kivy,wangjun/kivy,thezawad/kivy,ernstp/kivy,edubrunaldi/kivy,Shyam10/kivy,Ramalus/kivy,bionoid/kivy,dirkjot/kivy,LogicalDash/kivy,MiyamotoAkira/kivy,vitorio/kivy,tony/kivy,xiaoyanit/kivy,Cheaterman/kivy,jkankiewicz/kivy,hansent/kivy,KeyWeeUsr/kivy,janssen/kivy,janssen/kivy,autosportlabs/kivy,manashmndl/kivy,kived/kivy,MiyamotoAkira/kivy,bhargav2408/kivy,rafalo1333/kivy,aron-bordin/kivy,Davideddu/kivy-forkedtouch,Cheaterman/kivy,manthansharma/kivy,akshayaurora/kivy,matham/kivy,hansent/kivy,viralpandey/kivy,bliz937/kivy,arlowhite/kivy,rnixx/kivy,habibmasuro/kivy,aron-bordin/kivy,matham/kivy,Shyam10/kivy,tony/kivy,denys-duchier/kivy,aron-bordin/kivy,vipulroxx/kivy,ernstp/kivy,cbenhagen/kivy,bob-the-hamster/kivy,jegger/kivy,bob-the-hamster/kivy,KeyWeeUsr/kivy,jffernandez/kivy,arcticshores/kivy,darkopevec/kivy,manashmndl/kivy,adamkh/kivy,bionoid/kivy,adamkh/kivy,zennobjects/kivy,LogicalDash/kivy,KeyWeeUsr/kivy,eHealthAfrica/kivy,manashmndl/kivy,MiyamotoAkira/kivy,jkankiewicz/kivy,inclement/kivy,hansent/kivy,gonzafirewall/kivy,youprofit/kivy,bob-the-hamster/kivy,ehealthafrica-ci/kivy,tony/kivy,denys-duchier/kivy,xiaoyanit/kivy,kivy/kivy,andnovar/kivy,jehutting/kivy,arlowhite/kivy,darkopevec/kivy,jegger/kivy,kivatu/kivy-bak,kived/kivy,wangjun/kivy,vipulroxx/kivy,ehealthafrica-ci/kivy,gonzafirewall/kivy,jehutting/kivy,adamkh/kivy,jffernandez/kivy,xiaoyanit/kivy,youprofit/kivy,viralpandey/kivy,youprofit/kivy,MiyamotoAkira/kivy,bionoid/kivy,Ramalus/kivy,CuriousLearner/kivy,kivy/kivy,iamutkarshtiwari/kivy,LogicalDash/kivy,yoelk/kivy,wangjun/kivy,kived/kivy,Cheaterman/kivy,viralpandey/kivy,gonzafirewall/kivy,angryrancor/kivy,inclement/kivy,jehutting/kivy,arcticshores/kivy,VinGarcia/kivy,ehealthafrica-ci/kivy,xpndlabs/kivy,bob-the-hamster/kivy,eHealthAfrica/kivy,KeyWeeUsr/kivy,thezawad/kivy,bionoid/kivy,jegger/kivy,dirkjot/kivy,Ramalus/kivy,habibmasuro/kivy,JohnHowland/kivy,ernstp/kivy,wangjun/kivy,manthansharma/kivy,autosportlabs/kivy,VinGarcia/kivy,matham/kivy,dirkjot/kivy,eHealthAfrica/kivy,denys-duchier/kivy,Farkal/kivy,bliz937/kivy,niavlys/kivy,ehealthafrica-ci/kivy,manthansharma/kivy,zennobjects/kivy,vitorio/kivy,janssen/kivy,CuriousLearner/kivy,ernstp/kivy,JohnHowland/kivy,cbenhagen/kivy,angryrancor/kivy,gonzafirewall/kivy,zennobjects/kivy,bliz937/kivy,angryrancor/kivy,akshayaurora/kivy,niavlys/kivy,andnovar/kivy,darkopevec/kivy,kivatu/kivy-bak,JohnHowland/kivy,Davideddu/kivy-forkedtouch,Farkal/kivy,habibmasuro/kivy,zennobjects/kivy,yoelk/kivy,vipulroxx/kivy | kivy/modules/touchring.py | kivy/modules/touchring.py | '''
Touchring module
================
Show ring around every touch on the table. You can use this module for checking
if you don't have any calibration trouble with touches.
'''
import os
from kivy import kivy_data_dir
from kivy.core.image import Image
from kivy.graphics import Color, Rectangle
pointer_image = None
pointer_scale = 1.0
pointer_alpha = 0.7
def _touch_down(win, touch):
ud = touch.ud
touch.scale_for_screen(win.width, win.height)
with win.canvas.after:
ud['tr.color'] = Color(1, 1, 1, pointer_alpha)
iw, ih = pointer_image.size
ud['tr.rect'] = Rectangle(
pos=(
touch.x - (pointer_image.width / 2. * pointer_scale),
touch.y - (pointer_image.height / 2. * pointer_scale)),
size=(iw * pointer_scale, ih * pointer_scale),
texture=pointer_image.texture)
def _touch_move(win, touch):
ud = touch.ud
ud['tr.rect'].pos = (
touch.x - (pointer_image.width / 2. * pointer_scale),
touch.y - (pointer_image.height / 2. * pointer_scale))
def _touch_up(win, touch):
ud = touch.ud
win.canvas.after.remove(ud['tr.color'])
win.canvas.after.remove(ud['tr.rect'])
def start(win, ctx):
global pointer_image, pointer_scale ,pointer_alpha
if not 'KIVY_DOC' in os.environ:
pointer_fn = ctx.config.get('image', os.path.join(kivy_data_dir, 'images', 'ring.png'))
pointer_scale = float(ctx.config.get('scale', 1.0))
pointer_alpha = float(ctx.config.get('alpha', 1.0))
pointer_image = Image(pointer_fn)
win.bind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
def stop(win, ctx):
win.unbind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
| '''
Touchring module
================
Show ring around every touch on the table. You can use this module for checking
if you don't have any calibration trouble with touches.
'''
import os
from kivy import kivy_data_dir
from kivy.core.image import Image
from kivy.graphics import Color, Rectangle
if not 'KIVY_DOC' in os.environ:
ring_fn = os.path.join(kivy_data_dir, 'images', 'ring.png')
ring_img = Image(ring_fn)
def _touch_down(win, touch):
ud = touch.ud
touch.scale_for_screen(win.width, win.height)
with win.canvas.after:
ud['tr.color'] = Color(1, 1, 1, .7)
iw, ih = ring_img.size
ud['tr.rect'] = Rectangle(
pos=(
touch.x - (ring_img.width / 2. * 0.3),
touch.y - (ring_img.height / 2. * 0.3)),
size=(iw * 0.3, ih * 0.3),
texture=ring_img.texture)
def _touch_move(win, touch):
ud = touch.ud
ud['tr.rect'].pos = (
touch.x - (ring_img.width / 2. * 0.3),
touch.y - (ring_img.height / 2. * 0.3))
def _touch_up(win, touch):
ud = touch.ud
win.canvas.after.remove(ud['tr.color'])
win.canvas.after.remove(ud['tr.rect'])
def start(win, ctx):
win.bind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
def stop(win, ctx):
win.unbind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
| mit | Python |
b020d1549b99ea85569fcefbeb6582ee186b8ef6 | print HA networks to log, print ports | CiscoSystems/os-sqe,CiscoSystems/os-sqe,CiscoSystems/os-sqe | lab/deployers/__init__.py | lab/deployers/__init__.py | import abc
from lab.with_config import WithConfig
class Deployer(WithConfig):
@abc.abstractmethod
def wait_for_cloud(self, list_of_servers):
"""Make sure that cloud is up and running on the provided list of servers
:param list_of_servers: list of server provided during provisioning phase
"""
pass
def verify_cloud(self, cloud, from_server):
from lab.logger import create_logger
log = create_logger()
net_list = from_server.run(command='neutron net-list {cloud}'.format(cloud=cloud))
ha_networks = {}
for line in net_list.split('\n'):
if 'HA network' in line:
_, net_id, name_tenant_id, subnet_id_cdir, _ = line.split('|')
subnet_id, _ = subnet_id_cdir.split()
_, _, _, tenant_id = name_tenant_id.split()
net_info = from_server.run(command='neutron net-show {subnet_id} {cloud}'.format(subnet_id=net_id, cloud=cloud))
seg_id = filter(lambda x: 'segmentation_id' in x, net_info.split('\r\n'))[0].split('|')[-2].strip()
ha_networks[net_id.strip()] = {'tenant_id': tenant_id, 'subnet_id': subnet_id, 'seg_id': seg_id}
log.info('n_ha_networks={n} seg_ids={seg_ids}'.format(n=len(ha_networks), seg_ids=sorted([x['seg_id'] for x in ha_networks.itervalues()])))
from_server.run(command='neutron port-list {cloud}'.format(cloud=cloud))
from_server.run(command='neutron router-list {cloud}'.format(cloud=cloud))
from_server.run(command='openstack server list {cloud}'.format(cloud=cloud))
for service in cloud.services():
for url in ['publicURL', 'internalURL', 'adminURL']:
end_point = from_server.run(command='openstack catalog show {service} {cloud} | grep {url} | awk \'{{print $4}}\''.format(cloud=cloud, service=service, url=url))
cloud.add_service_end_point(service=service, url=url, end_point=end_point)
return cloud
| import abc
from lab.with_config import WithConfig
class Deployer(WithConfig):
@abc.abstractmethod
def wait_for_cloud(self, list_of_servers):
"""Make sure that cloud is up and running on the provided list of servers
:param list_of_servers: list of server provided during provisioning phase
"""
pass
def verify_cloud(self, cloud, from_server):
from_server.run(command='neutron net-list {cloud}'.format(cloud=cloud))
from_server.run(command='neutron subnet-list {cloud}'.format(cloud=cloud))
from_server.run(command='neutron router-list {cloud}'.format(cloud=cloud))
from_server.run(command='openstack server list {cloud}'.format(cloud=cloud))
for service in cloud.services():
for url in ['publicURL', 'internalURL', 'adminURL']:
end_point = from_server.run(command='openstack catalog show {service} {cloud} | grep {url} | awk \'{{print $4}}\''.format(cloud=cloud, service=service, url=url))
cloud.add_service_end_point(service=service, url=url, end_point=end_point)
return cloud
| apache-2.0 | Python |
9044ae987f059c8dc173f6c21d75a4908a121afe | Update link of dependency | brain-tec/sale-workflow,thomaspaulb/sale-workflow,acsone/sale-workflow,acsone/sale-workflow,jabibi/sale-workflow,Endika/sale-workflow,Antiun/sale-workflow,BT-cserra/sale-workflow,fevxie/sale-workflow,akretion/sale-workflow,open-synergy/sale-workflow,ddico/sale-workflow,diagramsoftware/sale-workflow,factorlibre/sale-workflow,brain-tec/sale-workflow,akretion/sale-workflow,Eficent/sale-workflow | sale_payment_method_transaction_id/__openerp__.py | sale_payment_method_transaction_id/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name' : 'Sale Payment Method - Transaction ID Compatibility',
'version' : '1.0',
'author' : 'Camptocamp',
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'Hidden',
'depends' : ['sale_payment_method',
# base_transaction_id is in
# https://github.com/OCA/bank-statement-reconcile/tree/7.0
'base_transaction_id',
],
'description': """
Sale Payment Method - Transaction ID Compatibility
==================================================
Link module between the sale payment method module
and the module adding a transaction ID field (`base_transaction_id` in the
`lp:banking-addons/bank-statement-reconcile-7.0` branch).
When a payment is created from a sales order with a transaction ID, the
move lines are created with the transaction id.
""",
'website': 'http://www.camptocamp.com',
'data': [],
'tests': [],
'installable': True,
'auto_install': True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name' : 'Sale Payment Method - Transaction ID Compatibility',
'version' : '1.0',
'author' : 'Camptocamp',
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'Hidden',
'depends' : ['sale_payment_method',
'base_transaction_id', # in lp:banking-addons/bank-statement-reconcile-7.0
],
'description': """
Sale Payment Method - Transaction ID Compatibility
==================================================
Link module between the sale payment method module
and the module adding a transaction ID field (`base_transaction_id` in the
`lp:banking-addons/bank-statement-reconcile-7.0` branch).
When a payment is created from a sales order with a transaction ID, the
move lines are created with the transaction id.
""",
'website': 'http://www.camptocamp.com',
'data': [],
'tests': [],
'installable': True,
'auto_install': True,
}
| agpl-3.0 | Python |
49bd9ae52e24375b61999fa6a4915866ea9a0377 | add travis ci | kute/eventor | test/test.py | test/test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'kute'
# __mtime__ = '2016/12/25 17:47'
"""
"""
# from __future__ import absolute_import
import unittest
from eventor.core import Eventor
from eventor.util import EventorUtil
import os
class SimpleTest(unittest.TestCase):
def test_run_with_tasklist(self):
times = 2
elelist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def func(x):
return x + times
e = Eventor(threadcount=3, taskunitcount=3, func=func, interval=2)
result = e.run_with_tasklist(elelist, async=True, timeout=3)
self.assertEqual(sum(result), sum(elelist) + len(elelist) * times)
def test_run_with_file(self):
times = 2
elelist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
e = EventorUtil()
file = os.path.join(e.get_dir(relative=".."), "data.txt")
print(file)
def func(x):
return int(x) + times
e = Eventor(threadcount=3, taskunitcount=3, func=func, interval=2)
result = e.run_with_file(file, async=True, timeout=3)
self.assertEqual(sum(result), sum(elelist) + len(elelist) * times)
if __name__ == '__main__':
unittest.main()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'kute'
# __mtime__ = '2016/12/25 17:47'
"""
"""
# from __future__ import absolute_import
import unittest
from eventor.core import Eventor
from eventor.util import EventorUtil
import os
class SimpleTest(unittest.TestCase):
def test_run_with_tasklist(self):
times = 2
elelist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def func(x):
return x + times
e = Eventor(threadcount=3, taskunitcount=3, func=func, interval=2)
result = e.run_with_tasklist(elelist, async=True, timeout=3)
self.assertEqual(sum(result), sum(elelist) + len(elelist) * times)
def test_run_with_file(self):
times = 2
elelist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
e = EventorUtil()
file = os.path.join(e.get_dir(".."), "data.txt")
def func(x):
return int(x) + times
e = Eventor(threadcount=3, taskunitcount=3, func=func, interval=2)
result = e.run_with_file(file, async=True, timeout=3)
self.assertEqual(sum(result), sum(elelist) + len(elelist) * times)
if __name__ == '__main__':
unittest.main()
| mit | Python |
c7e55bfd8284c4bb6755abc51dd7c940bca9d81a | Add sound level to influx | ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display | sensor_consumers/dust_node.py | sensor_consumers/dust_node.py | # coding=utf-8
from local_settings import *
from utils import SensorConsumerBase
import redis
import datetime
import sys
class DustNode(SensorConsumerBase):
def __init__(self):
SensorConsumerBase.__init__(self, "indoor_air_quality")
def run(self):
self.subscribe("dust-node-pubsub", self.pubsub_callback)
def pubsub_callback(self, data):
if "action" in data:
return
influx_data = {
"measurement": "dustnode",
"timestamp": data["utctimestamp"].isoformat() + "Z",
"fields": {
"room_humidity": data["data"]["room_humidity"],
"room_temperature": round(data["data"]["room_temperature"], 1),
"barometer_temperature": round(data["data"]["barometer_temperature"], 1),
"barometer_pressure": round(data["data"]["barometer_reading"], 1),
"dust_density": round(data["data"]["dust_density"], 5),
"sound_level": data["data"]["sound_level"],
}
}
self.insert_into_influx([influx_data])
def main():
item = DustNode()
item.run()
return 0
if __name__ == '__main__':
sys.exit(main())
| # coding=utf-8
from local_settings import *
from utils import SensorConsumerBase
import redis
import datetime
import sys
class DustNode(SensorConsumerBase):
def __init__(self):
SensorConsumerBase.__init__(self, "indoor_air_quality")
def run(self):
self.subscribe("dust-node-pubsub", self.pubsub_callback)
def pubsub_callback(self, data):
if "action" in data:
return
influx_data = {
"measurement": "dustnode",
"timestamp": data["utctimestamp"].isoformat() + "Z",
"fields": {
"room_humidity": data["data"]["room_humidity"],
"room_temperature": round(data["data"]["room_temperature"], 1),
"barometer_temperature": round(data["data"]["barometer_temperature"], 1),
"barometer_pressure": round(data["data"]["barometer_reading"], 1),
"dust_density": round(data["data"]["dust_density"], 5)
}
}
self.insert_into_influx([influx_data])
def main():
item = DustNode()
item.run()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | Python |
6e9b1ad4af56dbde51673176113bec91baa90327 | add iosdriver | 2gis/contesto,rseek/contesto | contesto/basis/driver_mixin.py | contesto/basis/driver_mixin.py | from contesto.exceptions import UnknownBrowserName
class AbstractDriver(object):
_driver_type = None
dc_from_config = None
@classmethod
def _form_desired_capabilities(cls, driver_settings):
try:
cls.dc_from_config = driver_settings["desired_capabilities"]
except KeyError:
pass
class HttpDriver(AbstractDriver):
_driver_type = 'selenium'
@classmethod
def _form_desired_capabilities(cls, driver_settings):
super(HttpDriver, cls)._form_desired_capabilities(driver_settings)
if cls.dc_from_config:
return cls.dc_from_config
try:
desired_capabilities = cls.capabilities_map[driver_settings["browser"].lower()]
except KeyError:
raise UnknownBrowserName(driver_settings.selenium["browser"], cls.capabilities_map.keys())
desired_capabilities["platform"] = driver_settings["platform"]
return desired_capabilities
class QtWebkitDriver(AbstractDriver):
_driver_type = 'qtwebkitdriver'
@classmethod
def _form_desired_capabilities(cls, driver_settings):
super(QtWebkitDriver, cls)._form_desired_capabilities(driver_settings)
if cls.dc_from_config:
return cls.dc_from_config
desired_capabilities = dict()
desired_capabilities['app'] = driver_settings["app"]
return desired_capabilities
class IosDriver(AbstractDriver):
_driver_type = 'iosdriver'
@classmethod
def _form_desired_capabilities(cls, driver_settings):
super(IosDriver, cls)._form_desired_capabilities(driver_settings)
if cls.dc_from_config:
return cls.dc_from_config
desired_capabilities = dict()
desired_capabilities['app'] = driver_settings["app"]
desired_capabilities['device'] = driver_settings["device"]
desired_capabilities['platform'] = driver_settings["platform"]
desired_capabilities['version'] = driver_settings["version"]
return desired_capabilities | from contesto.exceptions import UnknownBrowserName
class AbstractDriver(object):
_driver_type = None
dc_from_config = None
@classmethod
def _form_desired_capabilities(cls, driver_settings):
try:
cls.dc_from_config = driver_settings["desired_capabilities"]
except KeyError:
pass
class HttpDriver(AbstractDriver):
_driver_type = 'selenium'
@classmethod
def _form_desired_capabilities(cls, driver_settings):
super(HttpDriver, cls)._form_desired_capabilities(driver_settings)
if cls.dc_from_config:
return cls.dc_from_config
try:
desired_capabilities = cls.capabilities_map[driver_settings["browser"].lower()]
except KeyError:
raise UnknownBrowserName(driver_settings.selenium["browser"], cls.capabilities_map.keys())
desired_capabilities["platform"] = driver_settings["platform"]
return desired_capabilities
class QtWebkitDriver(AbstractDriver):
_driver_type = 'qtwebkitdriver'
@classmethod
def _form_desired_capabilities(cls, driver_settings):
super(QtWebkitDriver, cls)._form_desired_capabilities(driver_settings)
if cls.dc_from_config:
return cls.dc_from_config
desired_capabilities = dict()
desired_capabilities['app'] = driver_settings["app"]
return desired_capabilities
| mit | Python |
b3fd19f309c883b9f09be964f1647dfb455ccb68 | Fix domain models __slots__ field | ets-labs/domain_models,rmk135/domain_models,ets-labs/python-domain-models | domain_models/model.py | domain_models/model.py | """Domain models model."""
import six
from . import fields
class DomainModelMetaClass(type):
"""Domain model meta class."""
def __new__(mcs, class_name, bases, attributes):
"""Domain model class factory."""
cls = type.__new__(mcs, class_name, bases, attributes)
for field_name, field in six.iteritems(attributes):
if not isinstance(field, fields.Field):
continue
field.name = field_name
field.model = cls
return cls
@six.add_metaclass(DomainModelMetaClass)
class DomainModel(object):
"""Base domain model."""
__view_key__ = tuple()
__unique_key__ = tuple()
def __eq__(self, compared):
"""Make equality comparation based on unique key.
If unique key is not defined, standard object's equality comparation
will be used.
"""
| """Domain models model."""
import six
from . import fields
class DomainModelMetaClass(type):
"""Domain model meta class."""
def __new__(mcs, class_name, bases, attributes):
"""Domain model class factory."""
cls = type.__new__(mcs, class_name, bases, attributes)
for field_name, field in six.iteritems(attributes):
if not isinstance(field, fields.Field):
continue
field.name = field_name
field.model = cls
return cls
@six.add_metaclass(DomainModelMetaClass)
class DomainModel(object):
"""Base domain model."""
__view_key__ = tuple()
__unique_key__ = tuple()
__slots__ = ('__view_key__', '__unique_key__')
def __eq__(self, compared):
"""Make equality comparation based on unique key.
If unique key is not defined, standard object's equality comparation
will be used.
"""
| bsd-3-clause | Python |
cc84a379132c8d500de62a5a111210ca867a71ae | Fix typo registrant->registrar | DomainTools/python_api,DomainTools/python_api | domaintools/results.py | domaintools/results.py | """Defines the used Result object based on the current versions and/or features available to Python runtime
Additionally, defines any custom result objects that may be used to enable more Pythonic interaction with endpoints.
"""
import sys
from itertools import chain
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
if sys.version_info[0] >= 3: # pragma: no cover
from itertools import zip_longest
else: # pragma: no cover
from itertools import izip_longest as zip_longest
if sys.version_info[0] >= 3 and sys.version_info[1] >= 5: # pragma: no cover
from domaintools_async import AsyncResults as Results
else: # pragma: no cover
from domaintools.base_results import Results
class Reputation(Results):
"""Returns the reputation results in a format that can quickly be converted into floats / ints"""
def __float__(self):
return float(self['risk_score'])
def __int__(self):
return int(self['risk_score'])
class GroupedIterable(Results):
"""Returns a results item in a format that allows for grouped iteration of mulpitle result lists"""
def _items(self):
if self._items_list is None:
self._items_list = chain(*[zip_longest([], value, fillvalue=key) for key, value in self.response().items()
if type(value) in (list, tuple)])
return self._items_list
class ParsedWhois(Results):
"""Returns the parsed whois results in a format that can quickly be flattened"""
def flattened(self):
"""Returns a flattened version of the parsed whois data"""
parsed = self['parsed_whois']
flat = OrderedDict()
for key in ('domain', 'created_date', 'updated_date', 'expired_date', 'statuses', 'name_servers'):
value = parsed[key]
flat[key] = ' | '.join(value) if type(value) in (list, tuple) else value
registrar = parsed.get('registrar', {})
for key in ('name', 'abuse_contact_phone', 'abuse_contact_email', 'iana_id', 'url', 'whois_server'):
flat['registrar_{0}'.format(key)] = registrar[key]
for contact_type in ('registrant', 'admin', 'tech', 'billing'):
contact = parsed.get('contacts', {}).get(contact_type, {})
for key in ('name', 'email', 'org', 'street', 'city', 'state', 'postal', 'country', 'phone', 'fax'):
value = contact[key]
flat['{0}_{1}'.format(contact_type, key)] = ' '.join(value) if type(value) in (list, tuple) else value
return flat
| """Defines the used Result object based on the current versions and/or features available to Python runtime
Additionally, defines any custom result objects that may be used to enable more Pythonic interaction with endpoints.
"""
import sys
from itertools import chain
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
if sys.version_info[0] >= 3: # pragma: no cover
from itertools import zip_longest
else: # pragma: no cover
from itertools import izip_longest as zip_longest
if sys.version_info[0] >= 3 and sys.version_info[1] >= 5: # pragma: no cover
from domaintools_async import AsyncResults as Results
else: # pragma: no cover
from domaintools.base_results import Results
class Reputation(Results):
"""Returns the reputation results in a format that can quickly be converted into floats / ints"""
def __float__(self):
return float(self['risk_score'])
def __int__(self):
return int(self['risk_score'])
class GroupedIterable(Results):
"""Returns a results item in a format that allows for grouped iteration of mulpitle result lists"""
def _items(self):
if self._items_list is None:
self._items_list = chain(*[zip_longest([], value, fillvalue=key) for key, value in self.response().items()
if type(value) in (list, tuple)])
return self._items_list
class ParsedWhois(Results):
"""Returns the parsed whois results in a format that can quickly be flattened"""
def flattened(self):
"""Returns a flattened version of the parsed whois data"""
parsed = self['parsed_whois']
flat = OrderedDict()
for key in ('domain', 'created_date', 'updated_date', 'expired_date', 'statuses', 'name_servers'):
value = parsed[key]
flat[key] = ' | '.join(value) if type(value) in (list, tuple) else value
registrar = parsed.get('registrar', {})
for key in ('name', 'abuse_contact_phone', 'abuse_contact_email', 'iana_id', 'url', 'whois_server'):
flat['registrant_{0}'.format(key)] = registrar[key]
for contact_type in ('registrant', 'admin', 'tech', 'billing'):
contact = parsed.get('contacts', {}).get(contact_type, {})
for key in ('name', 'email', 'org', 'street', 'city', 'state', 'postal', 'country', 'phone', 'fax'):
value = contact[key]
flat['{0}_{1}'.format(contact_type, key)] = ' '.join(value) if type(value) in (list, tuple) else value
return flat
| mit | Python |
3256d46fe1063f1da65b58942fd4f68b3c0d081f | Append extra args with a space for dox.yml | emonty/dox,emonty/dox,stackforge/dox,coolsvap/dox | dox/config/dox_yaml.py | dox/config/dox_yaml.py | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_dox_yaml',
]
import os
import yaml
_dox_yaml = None
def get_dox_yaml():
global _dox_yaml
if _dox_yaml is None:
_dox_yaml = DoxYaml()
return _dox_yaml
class DoxYaml(object):
_yaml = None
def _open_dox_yaml(self):
if self._yaml is None:
self._yaml = yaml.load(open('dox.yml', 'r'))
return self._yaml
def exists(self):
return os.path.exists('dox.yml')
def get_image(self, image):
return self._open_dox_yaml().get('image', image)
def get_commands(self, extra_args):
return " ".join([self._open_dox_yaml().get('commands')] +extra_args)
def get_prep_commands(self):
return self._open_dox_yaml().get('prep')
def get_add_files(self):
return self._open_dox_yaml().get('add')
| # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_dox_yaml',
]
import os
import yaml
_dox_yaml = None
def get_dox_yaml():
global _dox_yaml
if _dox_yaml is None:
_dox_yaml = DoxYaml()
return _dox_yaml
class DoxYaml(object):
_yaml = None
def _open_dox_yaml(self):
if self._yaml is None:
self._yaml = yaml.load(open('dox.yml', 'r'))
return self._yaml
def exists(self):
return os.path.exists('dox.yml')
def get_image(self, image):
return self._open_dox_yaml().get('image', image)
def get_commands(self, extra_args):
return self._open_dox_yaml().get('commands') + " ".join(extra_args)
def get_prep_commands(self):
return self._open_dox_yaml().get('prep')
def get_add_files(self):
return self._open_dox_yaml().get('add')
| apache-2.0 | Python |
4369de9f0f44860f27d26f6814dc100fefe421be | Fix tests for Django 1.10 | AliLozano/django-messages-extends,AliLozano/django-messages-extends,AliLozano/django-messages-extends | test_urls.py | test_urls.py | import django
if django.VERSION >= (1,10):
from django.conf.urls import include, url
patterns = lambda _ignore, x: list([x,])
else:
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^messages/', include('messages_extends.urls', namespace='messages')),
)
| from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'temp.views.home', name='home'),
# url(r'^temp/', include('temp.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^messages/', include('messages_extends.urls', namespace='messages')),
)
| mit | Python |
03b36b5dc594bbe239d0ad66dc43ea7d1832072c | Remove warnings | pypa/setuptools,pypa/setuptools,pypa/setuptools | setuptools/distutils_patch.py | setuptools/distutils_patch.py | """
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
import sys
import re
import os
import importlib
import warnings
def clear_distutils():
if 'distutils' not in sys.modules:
return
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
if enabled():
ensure_local_distutils()
| """
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
import sys
import re
import os
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools. This usage is discouraged "
"and may exhibit undesirable behaviors or errors. Please use "
"Setuptools' objects directly or at least import Setuptools first.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
warn_distutils_present()
if enabled():
ensure_local_distutils()
| mit | Python |
0534822dc02285060f57cdb27bda5ad3fbce8727 | Add support for altpassword. | AVOXI/b2bua,AVOXI/b2bua,sippy/b2bua,sippy/b2bua | sippy/IVoiSysAuthorisation.py | sippy/IVoiSysAuthorisation.py | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Software, Inc. by e-mail at the
# following addresses: sales@sippysoft.com.
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from SQLTransactionManager import SQLTransactionManager
from time import time
class IVoiSysAuthorisation(object):
def __init__(self, global_config):
dsn = 'mysql://sbcfront:gb19!lDLn2#)F$NFbd2*@sbcdb1.pennytel.com/sbc'
self.sql_tm = SQLTransactionManager(dsn, nworkers = 4, lazy_connect = True)
def do_auth(self, username, res_cb, *cb_args):
self.sql_tm.sendQuery(('SELECT password, outbound_proxy, domain, ' \
'altpassword, use_alt_password FROM SBC_Reg_Config ' \
'WHERE account_number = \'%s\'' % username), self._process_result, 0, False, None,
res_cb, cb_args)
def _process_result(self, results, exceptions, res_cb, cb_args):
print results, exceptions
if exceptions[0] != None or len(results[0]) == 0:
res_cb(None, *cb_args)
else:
password, outbound_proxy, domain, altpassword, use_alt_password = results[0][0]
if use_alt_password == 0:
altpassword = password
res_cb((password, altpassword, (outbound_proxy, 5060), domain), *cb_args)
| # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Software, Inc. by e-mail at the
# following addresses: sales@sippysoft.com.
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from SQLTransactionManager import SQLTransactionManager
from time import time
class IVoiSysAuthorisation(object):
def __init__(self, global_config):
dsn = 'mysql://sbcfront:gb19!lDLn2#)F$NFbd2*@sbcdb1.pennytel.com/sbc'
self.sql_tm = SQLTransactionManager(dsn, nworkers = 4, lazy_connect = True)
def do_auth(self, username, res_cb, *cb_args):
self.sql_tm.sendQuery(('SELECT password, outbound_proxy, domain FROM SBC_Reg_Config ' \
'WHERE account_number = \'%s\'' % username), self._process_result, 0, False, None,
res_cb, cb_args)
def _process_result(self, results, exceptions, res_cb, cb_args):
print results, exceptions
if exceptions[0] != None or len(results[0]) == 0:
res_cb(None, *cb_args)
else:
password, outbound_proxy, domain = results[0][0]
res_cb((password, (outbound_proxy, 5060), domain), *cb_args)
| bsd-2-clause | Python |
b2354fdde28bf841bebfc1f5347b2bde3c3cc390 | Fix bill table missed column | eddiedb6/ej,eddiedb6/ej,eddiedb6/ej | db/TableBill.py | db/TableBill.py | {
PDBConst.Name: "bill",
PDBConst.Columns: [
{
PDBConst.Name: "ID",
PDBConst.Attributes: ["int", "not null", "auto_increment", "primary key"]
},
{
PDBConst.Name: "PID",
PDBConst.Attributes: ["int", "not null"]
},
{
PDBConst.Name: "Datetime",
PDBConst.Attributes: ["datetime", "not null"]
},
{
PDBConst.Name: "Amount",
PDBConst.Attributes: ["double(12,2)", "not null"]
},
{
PDBConst.Name: "Currency",
PDBConst.Attributes: ["tinyint", "not null", "default 1"]
},
{
PDBConst.Name: "Category",
PDBConst.Attributes: ["tinyint"]
},
{
PDBConst.Name: "PaymentMode",
PDBConst.Attributes: ["tinyint"]
},
{
PDBConst.Name: "Note",
PDBConst.Attributes: ["varchar(255)"]
}]
}
| {
PDBConst.Name: "bill",
PDBConst.Columns: [
{
PDBConst.Name: "ID",
PDBConst.Attributes: ["int", "not null", "auto_increment", "primary key"]
},
{
PDBConst.Name: "Datetime",
PDBConst.Attributes: ["datetime", "not null"]
},
{
PDBConst.Name: "Amount",
PDBConst.Attributes: ["double(12,2)", "not null"]
},
{
PDBConst.Name: "Currency",
PDBConst.Attributes: ["tinyint", "not null", "default 1"]
},
{
PDBConst.Name: "Category",
PDBConst.Attributes: ["tinyint"]
},
{
PDBConst.Name: "PaymentMode",
PDBConst.Attributes: ["tinyint"]
},
{
PDBConst.Name: "Note",
PDBConst.Attributes: ["varchar(255)"]
}]
}
| mit | Python |
5da6ebbab96dd29a205d82b8fd0996085995cd2b | Update to fix width/height for rotation. | drougge/wellpapp-pyclient | db_oldrotate.py | db_oldrotate.py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from pyexiv2 import Image as ExivImage
import Image
from db_add import exif2rotation
from dbclient import dbclient
from sys import argv
opts = argv[1] # z for size (fix for rotation), r for rotate (from exif)
client = dbclient()
fields = ["rotate", "width", "height"]
posts = client._search_post("SPF" + " F".join(fields), fields)
print len(posts), "posts"
count = 0
for post in filter(lambda p: p["rotate"] in (-1, 90, 270), posts):
m = post["md5"]
fn = client.image_path(m)
exif = ExivImage(fn)
exif.readMetadata()
rot = exif2rotation(exif)
did = False
if rot in (90, 270) and "z" in opts:
img = Image.open(fn)
w, h = img.size
if post["width"] != h:
client.modify_post(m, width=h, height=w)
did = True
if rot != post["rotate"] and "r" in opts:
client.modify_post(m, rotate=rot)
did = True
if did: count += 1
print "Modified", count, "posts"
| #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from pyexiv2 import Image as ExivImage
from db_add import exif2rotation
from dbclient import dbclient
client = dbclient()
posts = client._search_post("SPFrotate", ["rotate"])
print len(posts), "posts"
for post in posts:
if post["rotate"] == -1:
m = post["md5"]
exif = ExivImage(client.image_path(m))
exif.readMetadata()
rot = exif2rotation(exif)
if rot >= 0:
client.modify_post(m, rotate=rot)
| mit | Python |
b047bb4b75df155cb20308612954767c8a5f5ac8 | Fix error message displayed when the initial api call on the dialogue screen fails | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | go/apps/dialogue/view_definition.py | go/apps/dialogue/view_definition.py | from django.http import HttpResponse
from bootstrap.forms import BootstrapForm
from go.api.go_api import client
from go.api.go_api.client import GoApiError
from go.conversation.view_definition import (
ConversationViewDefinitionBase, ConversationTemplateView)
class DialogueEditView(ConversationTemplateView):
"""This app is a unique and special snowflake, so it gets special views.
"""
view_name = 'edit'
path_suffix = 'edit/'
template_base = 'dialogue'
def get(self, request, conversation):
r = client.rpc(
request.session.session_key, 'conversation.dialogue.get_poll',
[request.user_api.user_account_key,
conversation.key])
if r.status_code != 200:
raise GoApiError(
"Failed to load dialogue from Go API:"
" (%r) %r." % (r.status_code, r.text))
model_data = {
'campaign_id': request.user_api.user_account_key,
'conversation_key': conversation.key,
}
model_data.update(r.json['result']['poll'])
return self.render_to_response({
'conversation': conversation,
'session_id': request.session.session_key,
'model_data': model_data,
})
class UserDataView(ConversationTemplateView):
view_name = 'user_data'
path_suffix = 'users.csv'
def get(self, request, conversation):
# TODO: write new CSV data export
csv_data = "TODO: write data export."
return HttpResponse(csv_data, content_type='application/csv')
class SendDialogueForm(BootstrapForm):
# TODO: Something better than this?
pass
class ConversationViewDefinition(ConversationViewDefinitionBase):
edit_view = DialogueEditView
extra_views = (
UserDataView,
)
action_forms = {
'send_dialogue': SendDialogueForm,
}
| from django.http import HttpResponse
from bootstrap.forms import BootstrapForm
from go.api.go_api import client
from go.api.go_api.client import GoApiError
from go.conversation.view_definition import (
ConversationViewDefinitionBase, ConversationTemplateView)
class DialogueEditView(ConversationTemplateView):
"""This app is a unique and special snowflake, so it gets special views.
"""
view_name = 'edit'
path_suffix = 'edit/'
template_base = 'dialogue'
def get(self, request, conversation):
r = client.rpc(
request.session.session_key, 'conversation.dialogue.get_poll',
[request.user_api.user_account_key,
conversation.key])
if r.status_code != 200:
raise GoApiError(
"Failed to load routing table from Go API:"
" (%r) %r." % (r.status_code, r.text))
model_data = {
'campaign_id': request.user_api.user_account_key,
'conversation_key': conversation.key,
}
model_data.update(r.json['result']['poll'])
return self.render_to_response({
'conversation': conversation,
'session_id': request.session.session_key,
'model_data': model_data,
})
class UserDataView(ConversationTemplateView):
view_name = 'user_data'
path_suffix = 'users.csv'
def get(self, request, conversation):
# TODO: write new CSV data export
csv_data = "TODO: write data export."
return HttpResponse(csv_data, content_type='application/csv')
class SendDialogueForm(BootstrapForm):
# TODO: Something better than this?
pass
class ConversationViewDefinition(ConversationViewDefinitionBase):
edit_view = DialogueEditView
extra_views = (
UserDataView,
)
action_forms = {
'send_dialogue': SendDialogueForm,
}
| bsd-3-clause | Python |
a50bb67591ce134c0333176872e3953eaedc6d2b | fix missing archive extention | pavel-paulau/perfrunner,pavel-paulau/perfrunner,thomas-couchbase/perfrunner,pavel-paulau/perfrunner,vmx/perfrunner,couchbase/perfrunner,mikewied/perfrunner,couchbase/perfrunner,dkao-cb/perfrunner,EricACooper/perfrunner,PaintScratcher/perfrunner,vmx/perfrunner,couchbase/perfrunner,PaintScratcher/perfrunner,hsharsha/perfrunner,dkao-cb/perfrunner,couchbase/perfrunner,couchbase/perfrunner,EricACooper/perfrunner,EricACooper/perfrunner,hsharsha/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,mikewied/perfrunner,thomas-couchbase/perfrunner,pavel-paulau/perfrunner,EricACooper/perfrunner | perfrunner/helpers/remote.py | perfrunner/helpers/remote.py | from uuid import uuid4
from fabric.api import execute, hide, get, run, parallel, settings
from logger import logger
from perfrunner.helpers import Helper
def all_hosts(task):
def wrapper(*args, **kargs):
self = args[0]
with hide('output', 'running'):
with settings(user=self.ssh_username, password=self.ssh_password):
return execute(parallel(task), *args, hosts=self.hosts, **kargs)
return wrapper
def single_host(task):
def wrapper(*args, **kargs):
self = args[0]
with hide('output', 'running'):
with settings(host_string=self.hosts[0],
user=self.ssh_username, password=self.ssh_password):
return task(*args, **kargs)
return wrapper
class RemoteHelper(Helper):
ARCH = {'i686': 'x86', 'i386': 'x86', 'x86_64': 'x86_64'}
def wget(self, url, outdir='/tmp'):
logger.info('Fetching {0}'.format(url))
run('wget -nc "{0}" -P {1}'.format(url, outdir))
@single_host
def detect_pkg(self):
logger.info('Detecting package manager')
dist = run('python -c "import platform; print platform.dist()[0]"')
if dist in ('Ubuntu', 'Debian'):
return 'deb'
else:
return 'rpm'
@single_host
def detect_arch(self):
logger.info('Detecting platform architecture')
arch = run('arch')
return self.ARCH[arch]
@all_hosts
def reset_swap(self):
logger.info('Resetting swap')
run('swapoff --all && swapon --all')
@all_hosts
def drop_caches(self):
logger.info('Dropping memory cache')
run('sync && echo 3 > /proc/sys/vm/drop_caches')
@all_hosts
def collect_info(self):
logger.info('Running cbcollect_info')
fname = uuid4().hex
run('/opt/couchbase/bin/cbcollect_info /tmp/{0}'.format(fname))
get('/tmp/{0}.zip'.format(fname))
run('rm -f /tmp/{0}.zip'.format(fname))
| from uuid import uuid4
from fabric.api import execute, hide, get, run, parallel, settings
from logger import logger
from perfrunner.helpers import Helper
def all_hosts(task):
def wrapper(*args, **kargs):
self = args[0]
with hide('output', 'running'):
with settings(user=self.ssh_username, password=self.ssh_password):
return execute(parallel(task), *args, hosts=self.hosts, **kargs)
return wrapper
def single_host(task):
def wrapper(*args, **kargs):
self = args[0]
with hide('output', 'running'):
with settings(host_string=self.hosts[0],
user=self.ssh_username, password=self.ssh_password):
return task(*args, **kargs)
return wrapper
class RemoteHelper(Helper):
ARCH = {'i686': 'x86', 'i386': 'x86', 'x86_64': 'x86_64'}
def wget(self, url, outdir='/tmp'):
logger.info('Fetching {0}'.format(url))
run('wget -nc "{0}" -P {1}'.format(url, outdir))
@single_host
def detect_pkg(self):
logger.info('Detecting package manager')
dist = run('python -c "import platform; print platform.dist()[0]"')
if dist in ('Ubuntu', 'Debian'):
return 'deb'
else:
return 'rpm'
@single_host
def detect_arch(self):
logger.info('Detecting platform architecture')
arch = run('arch')
return self.ARCH[arch]
@all_hosts
def reset_swap(self):
logger.info('Resetting swap')
run('swapoff --all && swapon --all')
@all_hosts
def drop_caches(self):
logger.info('Dropping memory cache')
run('sync && echo 3 > /proc/sys/vm/drop_caches')
@all_hosts
def collect_info(self):
logger.info('Running cbcollect_info')
fname = uuid4().hex
run('/opt/couchbase/bin/cbcollect_info /tmp/{0}'.format(fname))
get('/tmp/{0}'.format(fname))
run('rm -f /tmp/{0}'.format(fname))
| apache-2.0 | Python |
e6d539d073bcac1bee247ca66706621b11969fd6 | Support Meta and F* keys in urwid. | 5monkeys/bpython | bpython/keys.py | bpython/keys.py | # The MIT License
#
# Copyright (c) 2008 Simon de Vlieger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import string
class KeyMap:
def __init__(self):
self.map = {}
def __getitem__(self, key):
if not key:
# Unbound key
return str()
elif key in self.map:
return self.map[key]
else:
raise KeyError('Configured keymap (%s)' % key +
' does not exist in bpython.keys')
def __delitem__(self, key):
del self.map[key]
def __setitem__(self, key, value):
self.map[key] = value
cli_key_dispatch = KeyMap()
urwid_key_dispatch = KeyMap()
# fill dispatch with letters
for c in string.ascii_lowercase:
cli_key_dispatch['C-%s' % c] = (chr(string.ascii_lowercase.index(c) + 1),
'^%s' % c.upper())
for c in string.ascii_lowercase:
urwid_key_dispatch['C-%s' % c] = 'ctrl %s' % c
urwid_key_dispatch['M-%s' % c] = 'meta %s' % c
# fill dispatch with cool characters
cli_key_dispatch['C-['] = (chr(27), '^[')
cli_key_dispatch['C-\\'] = (chr(28), '^\\')
cli_key_dispatch['C-]'] = (chr(29), '^]')
cli_key_dispatch['C-^'] = (chr(30), '^^')
cli_key_dispatch['C-_'] = (chr(31), '^_')
# fill dispatch with function keys
for x in xrange(1, 13):
cli_key_dispatch['F%s' % str(x)] = ('KEY_F(%s)' % str(x),)
for x in xrange(1, 13):
urwid_key_dispatch['F%s' % str(x)] = 'f%s' % str(x)
| # The MIT License
#
# Copyright (c) 2008 Simon de Vlieger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import string
class KeyMap:
def __init__(self):
self.map = {}
def __getitem__(self, key):
if not key:
# Unbound key
return str()
elif key in self.map:
return self.map[key]
else:
raise KeyError('Configured keymap (%s)' % key +
' does not exist in bpython.keys')
def __delitem__(self, key):
del self.map[key]
def __setitem__(self, key, value):
self.map[key] = value
cli_key_dispatch = KeyMap()
urwid_key_dispatch = KeyMap()
# fill dispatch with letters
for c in string.ascii_lowercase:
cli_key_dispatch['C-%s' % c] = (chr(string.ascii_lowercase.index(c) + 1),
'^%s' % c.upper())
for c in string.ascii_lowercase:
urwid_key_dispatch['C-%s' % c] = 'ctrl %s' % c
# fill dispatch with cool characters
cli_key_dispatch['C-['] = (chr(27), '^[')
cli_key_dispatch['C-\\'] = (chr(28), '^\\')
cli_key_dispatch['C-]'] = (chr(29), '^]')
cli_key_dispatch['C-^'] = (chr(30), '^^')
cli_key_dispatch['C-_'] = (chr(31), '^_')
# fill dispatch with function keys
for x in xrange(1, 13):
cli_key_dispatch['F%s' % str(x)] = ('KEY_F(%s)' % str(x),)
for x in xrange(1, 13):
urwid_key_dispatch['F%s' % str(x)] = 'F%s' % str(x)
| mit | Python |
6c866caa0b5d6a251c694e13a3f1d5e32c45ae3a | Add missing NameFilter.to_es_filter() implementation | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq | corehq/apps/export/filters.py | corehq/apps/export/filters.py | from corehq.apps.es import filters as esfilters
from corehq.apps.es.cases import (
owner,
is_closed,
opened_range,
modified_range,
user,
closed_range,
)
class ExportFilter(object):
"""
Abstract base class for an export filter on a single case or form property
"""
def to_es_filter(self):
"""
Return an ES filter representing this filter
"""
raise NotImplementedError
# TODO: Add another function here to be used for couch filtering
class RangeExportFilter(ExportFilter):
def __init__(self, gt=None, gte=None, lt=None, lte=None):
self.gt = gt
self.gte = gte
self.lt = lt
self.lte = lte
class OwnerFilter(ExportFilter):
"""
Filter on owner_id
"""
def __init__(self, owner_id):
self.owner_id = owner_id
def to_es_filter(self):
return owner(self.owner_id)
class IsClosedFilter(ExportFilter):
"""
Filter on case closed property
"""
def __init__(self, is_closed):
self.is_closed = is_closed
def to_es_filter(self):
return is_closed(self.is_closed)
class NameFilter(ExportFilter):
def __init__(self, case_name):
self.case_name = case_name
def to_es_filter(self):
return esfilters.term('name', self.case_name)
class OpenedOnRangeFilter(RangeExportFilter):
def to_es_filter(self):
return opened_range(self.gt, self.gte, self.lt, self.lte)
class OpenedByFilter(ExportFilter):
def __init__(self, opened_by):
self.opened_by = opened_by
def to_es_filter(self):
# TODO: Add this to default case filters?
return esfilters.term('opened_by', self.opened_by)
class ModifiedOnRangeFilter(RangeExportFilter):
def to_es_filter(self):
return modified_range(self.gt, self.gte, self.lt, self.lte)
class LastModifiedByFilter(ExportFilter):
def __init__(self, last_modified_by):
self.last_modified_by = last_modified_by
def to_es_filter(self):
return user(self.last_modified_by)
class ClosedOnRangeFilter(RangeExportFilter):
def to_es_filter(self):
return closed_range(self.gt, self.gte, self.lt, self.lte)
class ClosedByFilter(ExportFilter):
def __init__(self, closed_by):
self.closed_by = closed_by
def to_es_filter(self):
return esfilters.term("closed_by", self.closed_by)
# TODO: owner/modifier/closer in location/group filters
# TODO: Add form filters
| from corehq.apps.es import filters as esfilters
from corehq.apps.es.cases import (
owner,
is_closed,
opened_range,
modified_range,
user,
closed_range,
)
class ExportFilter(object):
"""
Abstract base class for an export filter on a single case or form property
"""
def to_es_filter(self):
"""
Return an ES filter representing this filter
"""
raise NotImplementedError
# TODO: Add another function here to be used for couch filtering
class RangeExportFilter(ExportFilter):
def __init__(self, gt=None, gte=None, lt=None, lte=None):
self.gt = gt
self.gte = gte
self.lt = lt
self.lte = lte
class OwnerFilter(ExportFilter):
"""
Filter on owner_id
"""
def __init__(self, owner_id):
self.owner_id = owner_id
def to_es_filter(self):
return owner(self.owner_id)
class IsClosedFilter(ExportFilter):
"""
Filter on case closed property
"""
def __init__(self, is_closed):
self.is_closed = is_closed
def to_es_filter(self):
return is_closed(self.is_closed)
class NameFilter(ExportFilter):
def __init__(self, case_name):
self.case_name = case_name
def to_es_filter(self):
raise NotImplementedError
class OpenedOnRangeFilter(RangeExportFilter):
def to_es_filter(self):
return opened_range(self.gt, self.gte, self.lt, self.lte)
class OpenedByFilter(ExportFilter):
def __init__(self, opened_by):
self.opened_by = opened_by
def to_es_filter(self):
# TODO: Add this to default case filters?
return esfilters.term('opened_by', self.opened_by)
class ModifiedOnRangeFilter(RangeExportFilter):
def to_es_filter(self):
return modified_range(self.gt, self.gte, self.lt, self.lte)
class LastModifiedByFilter(ExportFilter):
def __init__(self, last_modified_by):
self.last_modified_by = last_modified_by
def to_es_filter(self):
return user(self.last_modified_by)
class ClosedOnRangeFilter(RangeExportFilter):
def to_es_filter(self):
return closed_range(self.gt, self.gte, self.lt, self.lte)
class ClosedByFilter(ExportFilter):
def __init__(self, closed_by):
self.closed_by = closed_by
def to_es_filter(self):
return esfilters.term("closed_by", self.closed_by)
# TODO: owner/modifier/closer in location/group filters
# TODO: Add form filters
| bsd-3-clause | Python |
b4250c4f8e700906a6d82915f98b8a0133892396 | Make subclass testing more boring | kelle/astropy,DougBurke/astropy,lpsinger/astropy,lpsinger/astropy,pllim/astropy,joergdietrich/astropy,funbaker/astropy,saimn/astropy,joergdietrich/astropy,dhomeier/astropy,astropy/astropy,pllim/astropy,dhomeier/astropy,funbaker/astropy,stargaser/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,tbabej/astropy,mhvk/astropy,kelle/astropy,dhomeier/astropy,larrybradley/astropy,saimn/astropy,mhvk/astropy,tbabej/astropy,mhvk/astropy,MSeifert04/astropy,tbabej/astropy,kelle/astropy,saimn/astropy,saimn/astropy,joergdietrich/astropy,AustereCuriosity/astropy,DougBurke/astropy,mhvk/astropy,pllim/astropy,lpsinger/astropy,tbabej/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,tbabej/astropy,larrybradley/astropy,saimn/astropy,AustereCuriosity/astropy,bsipocz/astropy,astropy/astropy,bsipocz/astropy,joergdietrich/astropy,mhvk/astropy,lpsinger/astropy,MSeifert04/astropy,aleksandr-bakanov/astropy,DougBurke/astropy,astropy/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,joergdietrich/astropy,kelle/astropy,pllim/astropy,bsipocz/astropy,AustereCuriosity/astropy,DougBurke/astropy,AustereCuriosity/astropy,astropy/astropy,astropy/astropy,larrybradley/astropy,stargaser/astropy,MSeifert04/astropy,dhomeier/astropy,larrybradley/astropy,StuartLittlefair/astropy,lpsinger/astropy,pllim/astropy,funbaker/astropy,funbaker/astropy,StuartLittlefair/astropy,kelle/astropy,StuartLittlefair/astropy,stargaser/astropy,MSeifert04/astropy,bsipocz/astropy,AustereCuriosity/astropy,larrybradley/astropy,stargaser/astropy | astropy/table/tests/test_subclass.py | astropy/table/tests/test_subclass.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
from .. import pprint
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.Column):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(pprint.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert isinstance(t['col0'], MyColumn)
assert isinstance(t.columns, MyTableColumns)
assert isinstance(t.formatter, MyTableFormatter)
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert isinstance(t['col0'], MyMaskedColumn)
assert isinstance(t.formatter, MyTableFormatter)
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
from .. import pprint
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
def cool(self):
return 'Cool!'
class MyMaskedColumn(table.Column):
def cool(self):
return 'MaskedCool!'
class MyTableColumns(table.TableColumns):
def cool(self):
return 'CoolTableColumns!'
class MyTableFormatter(pprint.TableFormatter):
def cool(self):
return 'CoolTableFormatter!'
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'Cool!'
assert t.columns.cool() == 'CoolTableColumns!'
assert t.formatter.cool() == 'CoolTableFormatter!'
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'MaskedCool!'
assert t.formatter.cool() == 'CoolTableFormatter!'
| bsd-3-clause | Python |
5db183c681af4b078fa8addc8cf4268b7315678a | allow to give modifiers to grep values | msztolcman/jsontool,mysz/jsontool | jsontool.py | jsontool.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" jsontool - Perform some actions with json using CLI
http://github.com/mysz/jsontool
Author: Marcin Sztolcman (marcin@urzenia.net)
Get help with: jsontool.py --help
Information about version: jsontool.py --version
"""
from __future__ import print_function, unicode_literals
__version__ = '0.1.0'
import argparse
import json
import os.path
import sys
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import get_formatter_by_name
lexer = get_lexer_by_name('json')
formatter = get_formatter_by_name('terminal256')
def print_colorized(data, dst=sys.stdout):
if hasattr(dst, 'isatty') and dst.isatty():
print(highlight(data, lexer, formatter), end='', file=dst)
else:
print(data, file=dst)
except ImportError as e:
def print_colorized(data, dst=sys.stdout):
print(data, file=dst)
def show_version():
""" Show version info and exit.
"""
print('{0}: version {1}'.format(os.path.basename(sys.argv[0]), __version__))
sys.exit(0)
def build_filters(filter_definitions):
filters = []
if not filter_definitions:
return lambda item: bool(item)
modifiers = {
'i': int,
'f': float,
's': str,
'b': bool,
}
for definition in filter_definitions:
try:
key, value, modifier = definition.split(':', 2)
modifier = modifiers.get(modifier, None)
except ValueError:
key, value = definition.split(':', 1)
modifier = str
if not modifier:
modifier = lambda item: item
filters.append(lambda data: key in data and data[key] == modifier(value))
def _filter(item):
return item and all(flt(item) for flt in filters)
return _filter
def json_loads(data):
try:
return json.loads(data)
except ValueError:
pass
def main():
p = argparse.ArgumentParser()
p.add_argument('-f', '--field', type=str)
p.add_argument('-g', '--grep', action='append')
p.add_argument('-v', '--version', action='store_true')
# p.add_argument('-l', '--highlight', type=str)
args = p.parse_args()
if args.version:
show_version()
filters = build_filters(args.grep)
data = map(json_loads, sys.stdin)
data = filter(filters, data)
data.sort(key=lambda item: item[args.field])
for line in data:
line = json.dumps(line)
print_colorized(line)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" jsontool - Perform some actions with json using CLI
http://github.com/mysz/jsontool
Author: Marcin Sztolcman (marcin@urzenia.net)
Get help with: jsontool.py --help
Information about version: jsontool.py --version
"""
from __future__ import print_function, unicode_literals
__version__ = '0.1.0'
import argparse
import json
import os.path
import sys
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import get_formatter_by_name
lexer = get_lexer_by_name('json')
formatter = get_formatter_by_name('terminal256')
def print_colorized(data, dst=sys.stdout):
if hasattr(dst, 'isatty') and dst.isatty():
print(highlight(data, lexer, formatter), end='', file=dst)
else:
print(data, file=dst)
except ImportError as e:
def print_colorized(data, dst=sys.stdout):
print(data, file=dst)
def show_version():
""" Show version info and exit.
"""
print('{0}: version {1}'.format(os.path.basename(sys.argv[0]), __version__))
sys.exit(0)
def build_filters(filter_definitions):
filters = []
if not filter_definitions:
return lambda item: bool(item)
for definition in filter_definitions:
key, value = definition.split(':', 1)
filters.append(lambda data: key in data and data[key] == value)
def _filter(item):
return item and all(flt(item) for flt in filters)
return _filter
def json_loads(data):
try:
return json.loads(data)
except ValueError:
pass
def main():
p = argparse.ArgumentParser()
p.add_argument('-f', '--field', type=str)
p.add_argument('-g', '--grep', action='append')
p.add_argument('-v', '--version', action='store_true')
# p.add_argument('-l', '--highlight', type=str)
args = p.parse_args()
if args.version:
show_version()
filters = build_filters(args.grep)
data = map(json_loads, sys.stdin)
data = filter(filters, data)
data.sort(key=lambda item: item[args.field])
for line in data:
line = json.dumps(line)
print_colorized(line)
if __name__ == '__main__':
main()
| mit | Python |
9109458ca157907145d5ae2ce5f709bd72bacc6b | Update Error Message | rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org | readthedocs/doc_builder/exceptions.py | readthedocs/doc_builder/exceptions.py | # -*- coding: utf-8 -*-
"""Exceptions raised when building documentation."""
from django.utils.translation import ugettext_noop
class BuildEnvironmentException(Exception):
message = None
status_code = None
def __init__(self, message=None, **kwargs):
self.status_code = kwargs.pop(
'status_code',
None,
) or self.status_code or 1
message = message or self.get_default_message()
super().__init__(message, **kwargs)
def get_default_message(self):
return self.message
class BuildEnvironmentError(BuildEnvironmentException):
GENERIC_WITH_BUILD_ID = ugettext_noop(
'There was a problem with Read the Docs while building your documentation. '
'Please try again later. '
'However, if this problem persists, '
'please report this to us with your build id ({build_id}).',
)
class BuildEnvironmentCreationFailed(BuildEnvironmentError):
message = ugettext_noop('Build environment creation failed')
class VersionLockedError(BuildEnvironmentError):
message = ugettext_noop('Version locked, retrying in 5 minutes.')
status_code = 423
class ProjectBuildsSkippedError(BuildEnvironmentError):
message = ugettext_noop('Builds for this project are temporarily disabled')
class YAMLParseError(BuildEnvironmentError):
GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
'Problem in your project\'s configuration. {exception}',
)
class BuildTimeoutError(BuildEnvironmentError):
message = ugettext_noop('Build exited due to time out')
class BuildEnvironmentWarning(BuildEnvironmentException):
pass
class MkDocsYAMLParseError(BuildEnvironmentError):
GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
'Problem parsing MkDocs YAML configuration. {exception}',
)
INVALID_DOCS_DIR_CONFIG = ugettext_noop(
'The "docs_dir" config from your MkDocs YAML config file has to be a '
'string with relative or absolute path.',
)
INVALID_DOCS_DIR_PATH = ugettext_noop(
'The "docs_dir" config from your MkDocs YAML config file does not '
'contain a valid path.',
)
INVALID_EXTRA_CONFIG = ugettext_noop(
'The "{config}" config from your MkDocs YAML config file has to be a '
'a list of relative paths.',
)
EMPTY_CONFIG = 'Please make sure the MkDocs YAML configuration file is not empty.'
CONFIG_NOT_DICT = ugettext_noop(
'Your MkDocs YAML config file is incorrect. '
'Please follow the user guide https://www.mkdocs.org/user-guide/configuration/ '
'to configure the file properly.',
)
| # -*- coding: utf-8 -*-
"""Exceptions raised when building documentation."""
from django.utils.translation import ugettext_noop
class BuildEnvironmentException(Exception):
message = None
status_code = None
def __init__(self, message=None, **kwargs):
self.status_code = kwargs.pop(
'status_code',
None,
) or self.status_code or 1
message = message or self.get_default_message()
super().__init__(message, **kwargs)
def get_default_message(self):
return self.message
class BuildEnvironmentError(BuildEnvironmentException):
GENERIC_WITH_BUILD_ID = ugettext_noop(
'There was a problem with Read the Docs while building your documentation. '
'Please try again later. '
'However, if this problem persists, '
'please report this to us with your build id ({build_id}).',
)
class BuildEnvironmentCreationFailed(BuildEnvironmentError):
message = ugettext_noop('Build environment creation failed')
class VersionLockedError(BuildEnvironmentError):
message = ugettext_noop('Version locked, retrying in 5 minutes.')
status_code = 423
class ProjectBuildsSkippedError(BuildEnvironmentError):
message = ugettext_noop('Builds for this project are temporarily disabled')
class YAMLParseError(BuildEnvironmentError):
GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
'Problem in your project\'s configuration. {exception}',
)
class BuildTimeoutError(BuildEnvironmentError):
message = ugettext_noop('Build exited due to time out')
class BuildEnvironmentWarning(BuildEnvironmentException):
pass
class MkDocsYAMLParseError(BuildEnvironmentError):
GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
'Problem parsing MkDocs YAML configuration. {exception}',
)
INVALID_DOCS_DIR_CONFIG = ugettext_noop(
'The "docs_dir" config from your MkDocs YAML config file has to be a '
'string with relative or absolute path.',
)
INVALID_DOCS_DIR_PATH = ugettext_noop(
'The "docs_dir" config from your MkDocs YAML config file does not '
'contain a valid path.',
)
INVALID_EXTRA_CONFIG = ugettext_noop(
'The "{config}" config from your MkDocs YAML config file has to be a '
'a list of relative paths.',
)
EMPTY_CONFIG = 'Please make sure the MkDocs YAML configuration file is not empty.'
CONFIG_NOT_DICT = ugettext_noop(
'Your MkDocs YAML configuration was incorrect. '
'Please follow the user guide https://www.mkdocs.org/user-guide/configuration/ '
'to configure the file properly.',
)
| mit | Python |
c1f31f69ca7ba75185100cf7a8eabf58ed41ccdf | Connect signal callback using the model class as sender. | mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service | atmo/apps.py | atmo/apps.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import session_csrf
from django.apps import AppConfig
from django.db.models.signals import post_save, pre_delete
DEFAULT_JOB_TIMEOUT = 15
logger = logging.getLogger("django")
class AtmoAppConfig(AppConfig):
name = 'atmo'
def ready(self):
# The app is now ready. Include any monkey patches here.
# Monkey patch CSRF to switch to session based CSRF. Session
# based CSRF will prevent attacks from apps under the same
# domain. If you're planning to host your app under it's own
# domain you can remove session_csrf and use Django's CSRF
# library. See also
# https://github.com/mozilla/sugardough/issues/38
session_csrf.monkeypatch()
# Connect signals.
from atmo.jobs.models import SparkJob
from atmo.jobs.signals import assign_group_perm, remove_group_perm
post_save.connect(
assign_group_perm,
sender=SparkJob,
dispatch_uid='sparkjob_post_save_assign_perm',
)
pre_delete.connect(
remove_group_perm,
sender=SparkJob,
dispatch_uid='sparkjob_pre_delete_remove_perm',
)
class KeysAppConfig(AppConfig):
name = 'atmo.keys'
label = 'keys'
verbose_name = 'Keys'
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import session_csrf
from django.apps import AppConfig
from django.db.models.signals import post_save, pre_delete
DEFAULT_JOB_TIMEOUT = 15
logger = logging.getLogger("django")
class AtmoAppConfig(AppConfig):
name = 'atmo'
def ready(self):
# The app is now ready. Include any monkey patches here.
# Monkey patch CSRF to switch to session based CSRF. Session
# based CSRF will prevent attacks from apps under the same
# domain. If you're planning to host your app under it's own
# domain you can remove session_csrf and use Django's CSRF
# library. See also
# https://github.com/mozilla/sugardough/issues/38
session_csrf.monkeypatch()
# Connect signals.
from atmo.jobs.signals import assign_group_perm, remove_group_perm
post_save.connect(assign_group_perm, sender='jobs.SparkJob',
dispatch_uid='sparkjob_post_save_assign_perm')
pre_delete.connect(remove_group_perm, sender='jobs.SparkJob',
dispatch_uid='sparkjob_pre_delete_remove_perm')
class KeysAppConfig(AppConfig):
name = 'atmo.keys'
label = 'keys'
verbose_name = 'Keys'
| mpl-2.0 | Python |
477e200d36de1ec83146fd0117745997ae3d7357 | Update clean documentation | J535D165/recordlinkage,J535D165/recordlinkage | recordlinkage/standardise/cleaning.py | recordlinkage/standardise/cleaning.py | from __future__ import division
from __future__ import absolute_import
import logging
import numpy as np
import pandas as pd
import itertools
def clean(s, lower=True, replace_by_none='[^ \-\_A-Za-z0-9]+', replace_by_whitespace='[\-\_]', remove_brackets=True):
"""
clean(lower=True, replace_by_none='[^ \-\_A-Za-z0-9]+', replace_by_whitespace='[\-\_]', remove_brackets=True)
Clean a pandas Series with strings. Remove unwanted tokens and additional
whitespace.
:param s: A pandas.Series to clean.
:param lower: Convert the strings in lowercase characters.
:param replace_by_none: A regular expression that is replaced by ''.
:param replace_by_whitespace: A regular expression that is replaced by a whitespace.
:param remove_brackets: Remove all content between brackets and the brackets themselves.
:type s: pandas.Series
:type lower: boolean
:type replace_by_none: string
:type replace_by_whitespace: string
:type remove_brackets: boolean
:return: A cleaned Series of strings.
:rtype: pandas.Series
For example:
>>> s = pandas.Series(['Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)'])
>>> print(recordlinkage.clean(s))
mary ann
bob
angel
bob
"""
# Lower s if lower is True
s = s.str.lower() if lower else s
# Remove all content between brackets
if remove_brackets:
s = s.str.replace(r'(\[.*?\]|\(.*?\)|\{.*?\})', '')
# Remove the special characters
s = s.str.replace(replace_by_none, '')
s = s.str.replace(replace_by_whitespace, ' ')
# Remove multiple whitespaces
s = s.str.replace(r'\s\s+', ' ')
# Strip s
s = s.str.lstrip().str.rstrip()
return s
# def clean_phonenumbers(s, country='USA'):
# """ Clean string formatted phonenumbers into string of intergers.
# :return: A Series with cleaned phonenumbers.
# :rtype: pandas.Series
# """
# s = s.astype(str)
# # Remove all special tokens
# s = s.str.replace('[^0-9]+', '')
# return s
def value_occurence(s):
"""
Count the number of times a value occurs. The difference with pandas.value_counts is that this function returns the values for each row.
:return: A Series with value counts.
:rtype: pandas.Series
"""
value_count = s.fillna('NAN')
return value_count.groupby(by=value_count).transform('count')
| from __future__ import division
from __future__ import absolute_import
import logging
import numpy as np
import pandas as pd
import itertools
def clean(s, lower=True, replace_by_none='[^ \-\_A-Za-z0-9]+', replace_by_whitespace='[\-\_]', remove_brackets=True):
"""
clean(lower=True, replace_by_none='[^ \-\_A-Za-z0-9]+', replace_by_whitespace='[\-\_]', remove_brackets=True)
Remove special tokens from a series of strings.
:param lower: Convert the strings in lowercase characters.
:param replace_by_none: A regular expression that is replaced by ''.
:param replace_by_whitespace: A regular expression that is replaced by a whitespace.
:param remove_brackets: Remove all content between brackets and the brackets themselves.
:return: A cleaned Series of strings.
:rtype: pandas.Series
For example:
>>> s = pandas.Series(['Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)'])
>>> print(recordlinkage.clean(s))
mary ann
bob
angel
bob
"""
# Lower s if lower is True
s = s.str.lower() if lower else s
# Remove all content between brackets
if remove_brackets:
s = s.str.replace(r'(\[.*?\]|\(.*?\)|\{.*?\})', '')
# Remove the special characters
s = s.str.replace(replace_by_none, '')
s = s.str.replace(replace_by_whitespace, ' ')
# Remove multiple whitespaces
s = s.str.replace(r'\s\s+', ' ')
# Strip s
s = s.str.lstrip().str.rstrip()
return s
# def clean_phonenumbers(s, country='USA'):
# """ Clean string formatted phonenumbers into string of intergers.
# :return: A Series with cleaned phonenumbers.
# :rtype: pandas.Series
# """
# s = s.astype(str)
# # Remove all special tokens
# s = s.str.replace('[^0-9]+', '')
# return s
def value_occurence(s):
"""
Count the number of times a value occurs. The difference with pandas.value_counts is that this function returns the values for each row.
:return: A Series with value counts.
:rtype: pandas.Series
"""
value_count = s.fillna('NAN')
return value_count.groupby(by=value_count).transform('count')
| bsd-3-clause | Python |
314014f0c4064398eb550f745085ff83bea3adf0 | Update webhost.py | anvanza/invenavi,anvanza/invenavi,anvanza/invenavi | web/webhost.py | web/webhost.py | import sys
import logging
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, \
WampServerFactory, \
WampServerProtocol
class RPCProtos:
def __init__(self):
logging.info("RPC:\tprotos init.")
@exportRpc
def sayhello(self, msg):
return ("hello " + msg)
@exportRpc
def set_drive(self, throttle, steering):
""" Direct drive. """
# throttle
self._kernel.set_throttle(throttle)
# steering
self._kernel.set_steering(steering)
return {'status':True}
class RPCProtocol(WampServerProtocol):
def onClose(self, wasClean, code, reason):
logging.info("RPC:\t"+reason)
def onSessionOpen(self):
self.protos = RPCProtos()
self.registerForRpc(self.protos, "http://10.0.0.141/ws/protos#")
logging.info("RPC:\tnew connection.")
def run_main_host(kernel, rpc_port):
def __init__(self, kernel):
self._kernel = kernel
log.startLogging(sys.stdout)
factory = WampServerFactory("ws://localhost:9000", debugWamp = True)
factory.protocol = RPCProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| import sys
import logging
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, \
WampServerFactory, \
WampServerProtocol
class RPCProtos:
def __init__(self):
logging.info("RPC:\tprotos init.")
@exportRpc
def sayhello(self, msg):
return ("hello " + msg)
@exportRpc
def set_drive(self, throttle, steering):
""" Direct drive. """
# throttle
self._kernel.set_throttle(throttle)
# steering
self._kernel.set_steering(steering)
return {'status':True}
class RPCProtocol(WampServerProtocol):
def __init__(self, kernel):
self._kernel = kernel
def onClose(self, wasClean, code, reason):
logging.info("RPC:\t"+reason)
def onSessionOpen(self):
self.protos = RPCProtos()
self.registerForRpc(self.protos, "http://10.0.0.141/ws/protos#")
logging.info("RPC:\tnew connection.")
def run_main_host(kernel, rpc_port):
log.startLogging(sys.stdout)
factory = WampServerFactory("ws://localhost:9000", debugWamp = True)
factory.protocol = RPCProtocol(kernel)
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| mit | Python |
b972634c03a00527c7fadefa873376737ecbcb61 | Update webhost.py | anvanza/invenavi,anvanza/invenavi,anvanza/invenavi | web/webhost.py | web/webhost.py | import sys
import logging
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, \
WampServerFactory, \
WampServerProtocol
class RPCProtos:
def __init__(self, kernel):
logging.info("RPC:\tprotos init.")
self._kernel = kernel
@exportRpc
def set_drive(self, throttle, steering):
""" Direct drive. """
# throttle
self._kernel.set_throttle(throttle)
# steering
self._kernel.set_steering(steering)
return {'status':True}
@exportRpc
def data(self):
self._kernel.update()
return {'lat': self._kernel.data.lat, 'lon': self._kernel.data.lon}
class RPCProtocol(WampServerProtocol):
def onClose(self, wasClean, code, reason):
logging.info("RPC:\t"+reason)
def onSessionOpen(self):
self.registerForRpc(self.protos, "http://10.0.0.142/ws/protos#")
logging.info("RPC:\tnew connection.")
def run_main_host(kernel, rpc_port):
log.startLogging(sys.stdout)
factory = WampServerFactory("ws://localhost:9000", debugWamp = True)
factory.protocol = RPCProtocol
factory.protocol.protos = RPCProtos(kernel)
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| import sys
import logging
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, \
WampServerFactory, \
WampServerProtocol
class RPCProtos:
def __init__(self, kernel):
logging.info("RPC:\tprotos init.")
self._kernel = kernel
@exportRpc
def set_drive(self, throttle, steering):
""" Direct drive. """
# throttle
self._kernel.set_throttle(throttle)
# steering
self._kernel.set_steering(steering)
return {'status':True}
@exportRpc
def data(self):
self._kernel.update()
return {'gpsfix' : self._kernel.data.fix, 'lat': self._kernel.data.lat, 'lon': self._kernel.data.lon}
class RPCProtocol(WampServerProtocol):
def onClose(self, wasClean, code, reason):
logging.info("RPC:\t"+reason)
def onSessionOpen(self):
self.registerForRpc(self.protos, "http://10.0.0.142/ws/protos#")
logging.info("RPC:\tnew connection.")
def run_main_host(kernel, rpc_port):
log.startLogging(sys.stdout)
factory = WampServerFactory("ws://localhost:9000", debugWamp = True)
factory.protocol = RPCProtocol
factory.protocol.protos = RPCProtos(kernel)
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run() | mit | Python |
6ec4c21cf7af09401aabadff79898fe783efe9bd | Fix import of rotate function | SamHames/scikit-image,michaelpacer/scikit-image,ofgulban/scikit-image,paalge/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,almarklein/scikit-image,rjeli/scikit-image,blink1073/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,SamHames/scikit-image,almarklein/scikit-image,GaZ3ll3/scikit-image,chriscrosscutler/scikit-image,blink1073/scikit-image,oew1v07/scikit-image,ClinicalGraphics/scikit-image,chriscrosscutler/scikit-image,dpshelio/scikit-image,chintak/scikit-image,emon10005/scikit-image,youprofit/scikit-image,michaelpacer/scikit-image,juliusbierk/scikit-image,warmspringwinds/scikit-image,paalge/scikit-image,SamHames/scikit-image,robintw/scikit-image,GaZ3ll3/scikit-image,juliusbierk/scikit-image,Hiyorimi/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,vighneshbirodkar/scikit-image,Britefury/scikit-image,pratapvardhan/scikit-image,ClinicalGraphics/scikit-image,keflavich/scikit-image,almarklein/scikit-image,oew1v07/scikit-image,ajaybhat/scikit-image,vighneshbirodkar/scikit-image,keflavich/scikit-image,michaelaye/scikit-image,vighneshbirodkar/scikit-image,chintak/scikit-image,WarrenWeckesser/scikits-image,Hiyorimi/scikit-image,rjeli/scikit-image,ajaybhat/scikit-image,warmspringwinds/scikit-image,bsipocz/scikit-image,ofgulban/scikit-image,emon10005/scikit-image,paalge/scikit-image,Midafi/scikit-image,rjeli/scikit-image,Britefury/scikit-image,jwiggins/scikit-image,robintw/scikit-image,bsipocz/scikit-image,pratapvardhan/scikit-image,WarrenWeckesser/scikits-image,bennlich/scikit-image,Midafi/scikit-image,newville/scikit-image,bennlich/scikit-image,chintak/scikit-image,newville/scikit-image,chintak/scikit-image,youprofit/scikit-image | skimage/transform/__init__.py | skimage/transform/__init__.py | from .hough_transform import *
from .radon_transform import *
from .finite_radon_transform import *
from .integral import *
from ._geometric import (estimate_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform)
from ._warps import warp, warp_coords, rotate, swirl, homography
| from .hough_transform import *
from .radon_transform import *
from .finite_radon_transform import *
from .integral import *
from ._geometric import (estimate_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform)
from ._warps import warp, warp_coords, swirl, homography
| bsd-3-clause | Python |
308924cd852f13212e0ca758c2eccc588af38b24 | Fix ResourcesProxy::getPath | onitake/Uranium,onitake/Uranium | UM/Qt/Bindings/ResourcesProxy.py | UM/Qt/Bindings/ResourcesProxy.py | from PyQt5.QtCore import QObject, pyqtSlot, QUrl, Q_ENUMS
from UM.Resources import Resources
class ResourcesProxy(QObject):
class Location:
ResourcesLocation = Resources.ResourcesLocation
SettingsLocation = Resources.SettingsLocation
PreferencesLocation = Resources.PreferencesLocation
ThemesLocation = Resources.ThemesLocation
Q_ENUMS(Location)
def __init__(self, parent = None):
super().__init__(parent)
@pyqtSlot(int, str, result=QUrl)
def getPath(self, type, name):
return QUrl.fromLocalFile(Resources.getPath(type, name))
@pyqtSlot(str, result=QUrl)
def getIcon(self, name):
return QUrl.fromLocalFile(Resources.getIcon(name))
| from PyQt5.QtCore import QObject, pyqtSlot, QUrl
from UM.Resources import Resources
class ResourcesProxy(QObject):
ResourcesLocation = Resources.ResourcesLocation
SettingsLocation = Resources.SettingsLocation
PreferencesLocation = Resources.PreferencesLocation
def __init__(self, parent = None):
super().__init__(parent)
@pyqtSlot(int, result=QUrl)
def getPath(self, type):
return QUrl.fromLocalFile(Resources.getPath(type))
@pyqtSlot(str, result=QUrl)
def getIcon(self, name):
return QUrl.fromLocalFile(Resources.getIcon(name))
| agpl-3.0 | Python |
c9f990ff4095b7fb361b2d59c0c5b2c9555643ff | Remove left over merge conflict text | uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged | csunplugged/tests/BaseTest.py | csunplugged/tests/BaseTest.py | """Base test class with methods implemented for Django testing."""
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.client import Client
from django.utils.translation import activate
class BaseTest(SimpleTestCase):
"""Base test class with methods implemented for Django testing."""
def __init__(self, *args, **kwargs):
"""Create the BaseTest object by calling the parent's constructor."""
super().__init__(*args, **kwargs)
self.language = None
@classmethod
def setUpClass(cls):
"""Automatically called before tests in class."""
super(BaseTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
"""Automatically called after each test."""
super(BaseTest, cls).tearDownClass()
def setUp(self):
"""Automatically called before each test.
Sets the language if specified and creates a new client.
"""
if self.language is not None:
activate(self.language)
self.client = Client()
def tearDown(self):
"""Automatically called after each test.
Deletes test user.
"""
pass
| """Base test class with methods implemented for Django testing."""
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.client import Client
from django.utils.translation import activate
<<<<<<< HEAD
class BaseTest(SimpleTestCase):
"""Base test class with methods implemented for Django testing."""
def __init__(self, *args, **kwargs):
"""Create the BaseTest object by calling the parent's constructor."""
super().__init__(*args, **kwargs)
self.language = None
@classmethod
def setUpClass(cls):
"""Automatically called before tests in class."""
super(BaseTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
"""Automatically called after each test."""
super(BaseTest, cls).tearDownClass()
def setUp(self):
"""Automatically called before each test.
Sets the language if specified and creates a new client.
"""
if self.language is not None:
activate(self.language)
self.client = Client()
def tearDown(self):
"""Automatically called after each test.
Deletes test user.
"""
pass
| mit | Python |
3d56133237b8ca79eb6f96c5cd94dc8bb0f71476 | Update __openerp__.py | ingadhoc/partner | partner_school/__openerp__.py | partner_school/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': u'ADHOC SA',
'category': 'base.module_category_hidden',
'demo': [],
'depends': ['base'],
'installable': True,
'license': 'AGPL-3',
'name': u'Partner School Data',
'test': [],
'data': [
'view/course_view.xml',
'view/partner_view.xml',
'security/ir.model.access.csv',
],
'version': '9.0.1.0.0',
'website': 'www.adhoc.com.ar'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': u'ADHOC SA',
'category': 'base.module_category_hidden',
'demo': [],
'depends': ['base'],
'installable': True,
'license': 'AGPL-3',
'name': u'Partner School Data',
'test': [],
'data': [
'view/course_view.xml',
'view/partner_view.xml',
'security/ir.model.access.csv',
],
'version': '9.0.1.0.0',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
305da4001219a014360f4529a2cfa8428a53a9a0 | convert language to locale | invalid-access/django-phonenumber-field,thenewguy/django-phonenumber-field,bramd/django-phonenumber-field,thenewguy/django-phonenumber-field,ellmetha/django-phonenumber-field,hovel/django-phonenumber-field,hwkns/django-phonenumber-field,bramd/django-phonenumber-field,invalid-access/django-phonenumber-field,hwkns/django-phonenumber-field,thenewguy/django-phonenumber-field,ellmetha/django-phonenumber-field,hovel/django-phonenumber-field,stefanfoulis/django-phonenumber-field | phonenumber_field/widgets.py | phonenumber_field/widgets.py | # -*- coding: utf-8 -*-
from babel import Locale
from phonenumbers.data import _COUNTRY_CODE_TO_REGION_CODE
from django.utils import translation
from django.forms import Select, TextInput
from django.forms.widgets import MultiWidget
from phonenumber_field.phonenumber import PhoneNumber
class PhonePrefixSelect(Select):
initial = None
def __init__(self, initial=None):
choices = [('', '---------')]
locale = Locale(translation.to_locale(translation.get_language()))
for prefix, values in _COUNTRY_CODE_TO_REGION_CODE.iteritems():
prefix = '+%d' % prefix
if initial and initial in values:
self.initial = prefix
for country_code in values:
country_name = locale.territories.get(country_code)
if country_name:
choices.append((prefix, u'%s %s' % (country_name, prefix)))
return super(PhonePrefixSelect, self).__init__(
choices=sorted(choices, key=lambda item: item[1]))
def render(self, name, value, *args, **kwargs):
return super(PhonePrefixSelect, self).render(
name, value or self.initial, *args, **kwargs)
class PhoneNumberPrefixWidget(MultiWidget):
"""
A Widget that splits phone number input into:
- a country select box for phone prefix
- an input for local phone number
"""
def __init__(self, attrs=None, initial=None):
widgets = (PhonePrefixSelect(initial), TextInput(),)
super(PhoneNumberPrefixWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
if type(value) == PhoneNumber:
if value.country_code and value.national_number:
return ["+%d" % value.country_code, value.national_number]
else:
return value.split('.')
return [None, None]
def value_from_datadict(self, data, files, name):
values = super(PhoneNumberPrefixWidget, self).value_from_datadict(
data, files, name)
return '%s.%s' % tuple(values)
| # -*- coding: utf-8 -*-
from babel import Locale
from phonenumbers.data import _COUNTRY_CODE_TO_REGION_CODE
from django.utils import translation
from django.forms import Select, TextInput
from django.forms.widgets import MultiWidget
from phonenumber_field.phonenumber import PhoneNumber
class PhonePrefixSelect(Select):
initial = None
def __init__(self, initial=None):
choices = [('', '---------')]
locale = Locale(translation.get_language())
for prefix, values in _COUNTRY_CODE_TO_REGION_CODE.iteritems():
prefix = '+%d' % prefix
if initial and initial in values:
self.initial = prefix
for country_code in values:
country_name = locale.territories.get(country_code)
if country_name:
choices.append((prefix, u'%s %s' % (country_name, prefix)))
return super(PhonePrefixSelect, self).__init__(
choices=sorted(choices, key=lambda item: item[1]))
def render(self, name, value, *args, **kwargs):
return super(PhonePrefixSelect, self).render(
name, value or self.initial, *args, **kwargs)
class PhoneNumberPrefixWidget(MultiWidget):
"""
A Widget that splits phone number input into:
- a country select box for phone prefix
- an input for local phone number
"""
def __init__(self, attrs=None, initial=None):
widgets = (PhonePrefixSelect(initial), TextInput(),)
super(PhoneNumberPrefixWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
if type(value) == PhoneNumber:
if value.country_code and value.national_number:
return ["+%d" % value.country_code, value.national_number]
else:
return value.split('.')
return [None, None]
def value_from_datadict(self, data, files, name):
values = super(PhoneNumberPrefixWidget, self).value_from_datadict(
data, files, name)
return '%s.%s' % tuple(values)
| mit | Python |
4173a544ce18dafe46be822921763e37d4a44a18 | add cvc4 | ultimate-pa/benchexec,ultimate-pa/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,dbeyer/benchexec,IljaZakharov/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,martin-neuhaeusser/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec,dbeyer/benchexec,sosy-lab/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec,IljaZakharov/benchexec,IljaZakharov/benchexec | benchexec/tools/ultimateautomizer.py | benchexec/tools/ultimateautomizer.py | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2015 Daniel Dietsch
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import ultimate
class Tool(ultimate.UltimateTool):
REQUIRED_PATHS = [
"artifacts.xml",
"AutomizerTermination.xml",
"Automizer.xml",
"AutomizerWitnessValidation.xml",
"configuration",
"cvc4"
"features",
"Kojak.xml",
"p2",
"plugins",
"svcomp-Deref-32bit-Automizer_Bitvector.epf",
"svcomp-Deref-32bit-Automizer_Default.epf",
"svcomp-DerefFreeMemtrack-32bit-Automizer_Bitvector.epf",
"svcomp-DerefFreeMemtrack-32bit-Automizer_Default.epf",
"svcomp-Overflow-64bit-Automizer_Default.epf",
"svcomp-Reach-32bit-Automizer_Bitvector.epf",
"svcomp-Reach-32bit-Automizer_Default.epf",
"svcomp-Reach-64bit-Automizer_Bitvector.epf",
"svcomp-Reach-64bit-Automizer_Default.epf",
"svcomp-Termination-64bit-Automizer_Default.epf",
"Ultimate",
"Ultimate.ini",
"Ultimate.py",
"z3"
]
def name(self):
return 'ULTIMATE Automizer'
| """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2015 Daniel Dietsch
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import ultimate
class Tool(ultimate.UltimateTool):
REQUIRED_PATHS = [
"artifacts.xml",
"AutomizerTermination.xml",
"Automizer.xml",
"AutomizerWitnessValidation.xml",
"configuration",
"features",
"Kojak.xml",
"p2",
"plugins",
"svcomp-Deref-32bit-Automizer_Bitvector.epf",
"svcomp-Deref-32bit-Automizer_Default.epf",
"svcomp-DerefFreeMemtrack-32bit-Automizer_Bitvector.epf",
"svcomp-DerefFreeMemtrack-32bit-Automizer_Default.epf",
"svcomp-Overflow-64bit-Automizer_Default.epf",
"svcomp-Reach-32bit-Automizer_Bitvector.epf",
"svcomp-Reach-32bit-Automizer_Default.epf",
"svcomp-Reach-64bit-Automizer_Bitvector.epf",
"svcomp-Reach-64bit-Automizer_Default.epf",
"svcomp-Termination-64bit-Automizer_Default.epf",
"Ultimate",
"Ultimate.ini",
"Ultimate.py",
"z3"
]
def name(self):
return 'ULTIMATE Automizer'
| apache-2.0 | Python |
8d47940ddf80d49102f72f78d485d884b14694ff | implement static rebalance test with index | hsharsha/perfrunner,mikewied/perfrunner,PaintScratcher/perfrunner,couchbase/perfrunner,EricACooper/perfrunner,thomas-couchbase/perfrunner,couchbase/perfrunner,vmx/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,mikewied/perfrunner,EricACooper/perfrunner,couchbase/perfrunner,EricACooper/perfrunner,thomas-couchbase/perfrunner,vmx/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,PaintScratcher/perfrunner,dkao-cb/perfrunner,EricACooper/perfrunner,dkao-cb/perfrunner,hsharsha/perfrunner | perfrunner/tests/rebalance.py | perfrunner/tests/rebalance.py | import time
from multiprocessing import Event
from perfrunner.tests import PerfTest
from perfrunner.tests.index import IndexTest
def with_delay(method):
def wrapper(self, *args, **kwargs):
time.sleep(self.rebalance_settings.start_after)
method(self, *args, **kwargs)
time.sleep(self.rebalance_settings.stop_after)
self.shutdown_event.set()
return wrapper
class RebalanceTest(PerfTest):
def __init__(self, *args, **kwargs):
super(RebalanceTest, self).__init__(*args, **kwargs)
self.shutdown_event = Event()
self.rebalance_settings = self.test_config.get_rebalance_settings()
@with_delay
def rebalance_in(self):
for cluster in self.cluster_spec.get_clusters():
master = cluster[0]
known_nodes = cluster[:self.rebalance_settings.nodes_after]
ejected_nodes = []
self.rest.rebalance(master, known_nodes, ejected_nodes)
self.monitor.monitor_rebalance(master)
class StaticRebalanceTest(RebalanceTest):
def run(self):
self._run_load_phase()
self._compact_bucket()
self.reporter.start()
self.rebalance_in()
value = self.reporter.finish('Rebalance')
self.reporter.post_to_sf(self, value)
self._debug()
class StaticRebalanceWithIndexTest(RebalanceTest, IndexTest):
def run(self):
self._run_load_phase()
self._compact_bucket()
self._define_ddocs()
self._build_index()
self.reporter.start()
self.rebalance_in()
value = self.reporter.finish('Rebalance')
self.reporter.post_to_sf(self, value)
self._debug()
| import time
from perfrunner.tests import PerfTest
from multiprocessing import Event
def with_delay(method):
def wrapper(self, *args, **kwargs):
time.sleep(self.rebalance_settings.start_after)
method(self, *args, **kwargs)
time.sleep(self.rebalance_settings.stop_after)
self.shutdown_event.set()
return wrapper
class RebalanceTest(PerfTest):
def __init__(self, *args, **kwargs):
super(RebalanceTest, self).__init__(*args, **kwargs)
self.shutdown_event = Event()
self.rebalance_settings = self.test_config.get_rebalance_settings()
@with_delay
def rebalance_in(self):
for cluster in self.cluster_spec.get_clusters():
master = cluster[0]
known_nodes = cluster[:self.rebalance_settings.nodes_after]
ejected_nodes = []
self.rest.rebalance(master, known_nodes, ejected_nodes)
self.monitor.monitor_rebalance(master)
class StaticRebalanceTest(RebalanceTest):
def run(self):
self._run_load_phase()
self._compact_bucket()
self.reporter.start()
self.rebalance_in()
value = self.reporter.finish('Rebalance')
self.reporter.post_to_sf(self, value)
self._debug()
| apache-2.0 | Python |
a02fd405c1fd5a86a4a3fb2e374d0c23ea204834 | use less stupid words in tests | martinsbalodis/warc-tools,martinsbalodis/warc-tools | hanzo/httptools/tests/parse_test.py | hanzo/httptools/tests/parse_test.py | import unittest2
from StringIO import StringIO
from hanzo.httptools.messaging import RequestParser, ResponseParser
get_request = "\r\n".join( [
"GET / HTTP/1.1",
"Host: example.org",
"",
"",
])
get_response = "\r\n".join( [
"HTTP/1.1 200 OK",
"Host: example.org",
"Content-Length:5",
"",
"tests",
])
class ParseRequestTest(unittest2.TestCase):
def runTest(self):
print repr(get_request)
buffer = StringIO()
p = RequestParser(buffer)
text = p.feed(get_request)
self.assertEqual(text, '')
self.assertEqual(get_request, buffer.getvalue())
self.assertTrue(p.complete)
print repr(get_response)
buffer = StringIO()
p = ResponseParser(buffer, p.header)
text = p.feed(get_response)
self.assertEqual(text, '')
self.assertEqual(get_response, buffer.getvalue())
self.assertTrue(p.complete)
if __name__ == '__main__':
unittest2.main()
| import unittest2
from StringIO import StringIO
from hanzo.httptools.messaging import RequestParser, ResponseParser
get_request = "\r\n".join( [
"GET / HTTP/1.1",
"Host: example.org",
"",
"",
])
get_response = "\r\n".join( [
"HTTP/1.1 200 OK",
"Host: example.org",
"Content-Length:5",
"",
"butts",
])
class ParseRequestTest(unittest2.TestCase):
def runTest(self):
print repr(get_request)
buffer = StringIO()
p = RequestParser(buffer)
text = p.feed(get_request)
self.assertEqual(text, '')
self.assertEqual(get_request, buffer.getvalue())
self.assertTrue(p.complete)
print repr(get_response)
buffer = StringIO()
p = ResponseParser(buffer, p.header)
text = p.feed(get_response)
self.assertEqual(text, '')
self.assertEqual(get_response, buffer.getvalue())
self.assertTrue(p.complete)
if __name__ == '__main__':
unittest2.main()
| mit | Python |
cebe264462553bca38be7938edfecd323a5413cc | add unit tests for bivariate k sample | kellieotto/permute,statlab/permute,kellieotto/permute,jarrodmillman/permute | permute/tests/test_ksample.py | permute/tests/test_ksample.py | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import assert_raises, raises
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_equal, assert_almost_equal, assert_array_less
from scipy.stats import hypergeom, binom
from cryptorandom.cryptorandom import SHA256
from ..ksample import (k_sample,
one_way_anova,
bivariate_k_sample,
two_way_anova)
import permute.data as data
from permute.utils import get_prng
def test_worms_ksample():
worms = data.worms()
res = k_sample(worms.x, worms.y, stat='one-way anova', reps=1000)
assert_array_less(0.005, res[0])
assert_array_less(res[0], 0.02)
def test_one_way_anova():
group = np.ones(5)
x = np.array(range(5))
xbar = np.mean(x)
assert_equal(one_way_anova(x, group, xbar), 0)
group = np.array([1]*3 + [2]*2)
expected = 3*1**2 + 2*1.5**2
assert_equal(one_way_anova(x, group, xbar), expected)
def test_two_way_anova():
prng = get_prng(100)
group1 = np.array([1]*5 + [2]*5)
group2 = np.array(list(range(5))*2)
x = prng.randint(1, 10, 10)
xbar = np.mean(x)
val = two_way_anova(x, group1, group2, xbar)
assert_almost_equal(val, 0.296, 3)
x = group2 + 1
xbar = 3
assert_equal(two_way_anova(x, group1, group2, xbar), 1)
def test_testosterone_ksample():
testosterone = data.testosterone()
x = np.hstack(testosterone.tolist())
group1 = np.hstack([[i]*5 for i in range(len(testosterone))])
group2 = np.array(list(range(5))*len(testosterone))
assert_equal(len(group1), 55)
assert_equal(len(group2), 55)
assert_equal(len(x), 55)
res = bivariate_k_sample(x, group1, group2, reps=5000, seed=5)
assert_array_less(res[0], 0.0002)
| from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import assert_raises, raises
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_equal, assert_almost_equal, assert_array_less
from scipy.stats import hypergeom, binom
from cryptorandom.cryptorandom import SHA256
from ..ksample import (k_sample, one_way_anova)
import permute.data as data
def test_worms_ksample():
worms = data.worms()
res = k_sample(worms.x, worms.y, stat='one-way anova')
assert_array_less(0.005, res[0])
assert_array_less(res[0], 0.02)
def test_one_way_anova():
group = np.ones(5)
x = np.array(range(5))
xbar = np.mean(x)
assert_equal(one_way_anova(x, group, xbar), 0)
group = np.array([1]*3 + [2]*2)
expected = 3*1**2 + 2*1.5**2
assert_equal(one_way_anova(x, group, xbar), expected) | bsd-2-clause | Python |
e1c453c3a1c7e63c48e86bb4b60517d8c52bbe37 | normalise kwarg in HOG unnecessary (should be set to default False, not None). Also changed the image numpy arrays to be logged and normalized between -1 and 1 | cavestruz/StrongCNN,cavestruz/StrongCNN | pipeline/image_processing.py | pipeline/image_processing.py | import numpy as np
import copy
from sklearn.base import BaseEstimator
def load_images(filenames):
'''Expects filenames to be a list of .fits file locations'''
from astropy.io.fits import getdata
return [np.log(getdata(filename))/np.max(np.abs(np.log(getdata(filename)))) for filename in filenames]
class MedianSmooth(BaseEstimator):
def __init__(self, radius = 3):
self.radius = radius
def fit(self, images, y = None):
return self
def transform(self, images):
from skimage.filters.rank import median
from skimage.morphology import disk
return np.array([median(image, disk(self.radius))
for image in images])
def fit_transform(self, images, y = None):
return self.transform(images)
class HOG(BaseEstimator):
def __init__(self, orientations = 9, pixels_per_cell = (8, 8),
cells_per_block = (3, 3) ):
self.orientations = orientations
self.pixels_per_cell = pixels_per_cell
self.cells_per_block = cells_per_block
def fit(self, images, y = None):
return self
def transform(self, images):
from skimage.feature import hog
return np.array([hog(image,
orientations = self.orientations,
pixels_per_cell = self.pixels_per_cell,
cells_per_block = self.cells_per_block,
)
for image in images])
def fit_transform(self, images, y = None):
return self.transform(images)
| import numpy as np
import copy
from sklearn.base import BaseEstimator
def load_images(filenames):
'''Expects filenames to be a list of .fits file locations'''
from astropy.io.fits import getdata
return [getdata(filename).copy()/np.max(getdata(filename).copy()) for filename in filenames]
class MedianSmooth(BaseEstimator):
def __init__(self, radius = 3):
self.radius = radius
def fit(self, images, y = None):
return self
def transform(self, images):
from skimage.filters.rank import median
from skimage.morphology import disk
return np.array([median(image, disk(self.radius))
for image in images])
def fit_transform(self, images, y = None):
return self.transform(images)
class HOG(BaseEstimator):
def __init__(self, orientations = 9, pixels_per_cell = (8, 8),
cells_per_block = (3, 3), normalise = False):
self.orientations = orientations
self.pixels_per_cell = pixels_per_cell
self.cells_per_block = cells_per_block
self.normalise = normalise
def fit(self, images, y = None):
return self
def transform(self, images):
from skimage.feature import hog
return np.array([hog(image,
orientations = self.orientations,
pixels_per_cell = self.pixels_per_cell,
cells_per_block = self.cells_per_block,
normalise = self.normalise)
for image in images])
def fit_transform(self, images, y = None):
return self.transform(images)
| mit | Python |
0ab2da918cbf0e58cf850f6868f5b896ea5c3893 | Make NickServIdentify play nice with service specific configs | Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot | heufybot/modules/util/nickservid.py | heufybot/modules/util/nickservid.py | from twisted.plugin import IPlugin
from heufybot.moduleinterface import BotModule, IBotModule
from zope.interface import implements
import logging
class NickServIdentify(BotModule):
implements(IPlugin, IBotModule)
name = "NickServIdentify"
def hookBot(self, bot):
self.bot = bot
def actions(self):
return [ ("welcome", 1, self.identify) ]
def identify(self, serverName):
if not self.bot.moduleHandler.useModuleOnServer(self.name, serverName):
return
if "nickserv_nick" not in self.bot.config and "nickserv_nick" not in self.bot.config["servers"][serverName]:
nick = "NickServ"
self.bot.servers[serverName].log("No valid NickServ nickname was found; defaulting to NickServ...",
level=logging.WARNING)
else:
nick = self.bot.config.serverItemWithDefault(serverName, "nickserv_nick", "NickServ")
if "nickserv_pass" not in self.bot.config and "nickserv_pass" not in self.bot.config["servers"][serverName]:
self.bot.servers[serverName].log("No NickServ password found. Aborting authentication...",
level=logging.ERROR)
return
password = self.bot.config.serverItemWithDefault(serverName, "nickserv_pass", None)
self.bot.servers[serverName].outputHandler.cmdPRIVMSG(nick, "IDENTIFY {}".format(password))
nickServID = NickServIdentify()
| from twisted.plugin import IPlugin
from heufybot.moduleinterface import BotModule, IBotModule
from zope.interface import implements
import logging
class NickServIdentify(BotModule):
implements(IPlugin, IBotModule)
name = "NickServIdentify"
def hookBot(self, bot):
self.bot = bot
def actions(self):
return [ ("welcome", 1, self.identify) ]
def identify(self, serverName):
if not self.bot.moduleHandler.useModuleOnServer(self.name, serverName):
return
if "nickserv_nick" not in self.bot.config:
nick = "NickServ"
self.bot.servers[serverName].log("No valid NickServ nickname was found; defaulting to NickServ...",
level=logging.WARNING)
else:
nick = self.bot.config["nickserv_nick"]
if "nickserv_pass" not in self.bot.config:
self.bot.servers[serverName].log("No NickServ password found. Aborting authentication...",
level=logging.ERROR)
return
password = self.bot.config["nickserv_pass"]
self.bot.servers[serverName].outputHandler.cmdPRIVMSG(nick, "IDENTIFY {}".format(password))
nickServID = NickServIdentify()
| mit | Python |
c1ea1e71155bea6e0fa6fe4dcb67bec364ae79ec | remove stray print statements | opencivicdata/python-legistar-scraper,datamade/python-legistar-scraper | legistar/people.py | legistar/people.py | from .base import LegistarScraper
class LegistarPersonScraper(LegistarScraper):
MEMBERLIST = None
def councilMembers(self, follow_links=True) :
for page in self.pages(self.MEMBERLIST) :
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolder1_gridPeople_ctl00']")[0]
for councilman, headers, row in self.parseDataTable(table):
if follow_links and type(councilman['Person Name']) == dict:
detail_url = councilman['Person Name']['url']
councilman_details = self.lxmlize(detail_url)
detail_div = councilman_details.xpath(".//div[@id='ctl00_ContentPlaceHolder1_pageDetails']")[0]
councilman.update(self.parseDetails(detail_div))
img = councilman_details.xpath(
"//img[@id='ctl00_ContentPlaceHolder1_imgPhoto']")
if img :
councilman['Photo'] = img[0].get('src')
committee_table = councilman_details.xpath(
"//table[@id='ctl00_ContentPlaceHolder1_gridDepartments_ctl00']")[0]
committees = self.parseDataTable(committee_table)
yield councilman, committees
else :
yield councilman
| from .base import LegistarScraper
class LegistarPersonScraper(LegistarScraper):
MEMBERLIST = None
def councilMembers(self, follow_links=True) :
for page in self.pages(self.MEMBERLIST) :
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolder1_gridPeople_ctl00']")[0]
for councilman, headers, row in self.parseDataTable(table):
if follow_links and type(councilman['Person Name']) == dict:
print(councilman)
detail_url = councilman['Person Name']['url']
councilman_details = self.lxmlize(detail_url)
detail_div = councilman_details.xpath(".//div[@id='ctl00_ContentPlaceHolder1_pageDetails']")[0]
councilman.update(self.parseDetails(detail_div))
print(councilman)
img = councilman_details.xpath(
"//img[@id='ctl00_ContentPlaceHolder1_imgPhoto']")
if img :
councilman['Photo'] = img[0].get('src')
committee_table = councilman_details.xpath(
"//table[@id='ctl00_ContentPlaceHolder1_gridDepartments_ctl00']")[0]
committees = self.parseDataTable(committee_table)
yield councilman, committees
else :
yield councilman
| bsd-3-clause | Python |
70737f757c67446859b604425d28dca233b99769 | fix xml order in terp | Som-Energia/somenergia-generationkwh,Som-Energia/somenergia-generationkwh | som_generationkwh/__terp__.py | som_generationkwh/__terp__.py | # -*- coding: utf-8 -*-
{
"name": "Generation kWh",
"description": """Support for SomEnergia's Generation kWh in GisceERP""",
"version": "2.0",
"author": "GISCE-TI & Som Energia",
"category": "Master",
"depends": [
'base',
"poweremail",
"poweremail_references",
'som_polissa_soci',
'som_inversions',
'som_plantmeter',
],
"init_xml": [],
"demo_xml": [],
"update_xml": [
"som_generationkwh_data.xml",
"giscedata_facturacio_view.xml",
"som_generationkwh_view.xml",
"wizard/wizard_investment_activation.xml",
"wizard/wizard_investment_amortization.xml",
"wizard/wizard_investment_payment.xml",
"wizard/wizard_investment_creation.xml",
"wizard/wizard_investment_cancel_or_resign.xml",
"investment_view.xml",
"assignment_view.xml",
"somenergia_soci_view.xml",
"somenergia_soci_data.xml",
"security/som_generationkwh_security.xml",
"security/ir.model.access.csv",
"amortization_report.xml"
],
"active": False,
"installable": True
}
| # -*- coding: utf-8 -*-
{
"name": "Generation kWh",
"description": """Support for SomEnergia's Generation kWh in GisceERP""",
"version": "2.0",
"author": "GISCE-TI & Som Energia",
"category": "Master",
"depends": [
'base',
"poweremail",
"poweremail_references",
'som_polissa_soci',
'som_inversions',
'som_plantmeter',
],
"init_xml": [],
"demo_xml": [],
"update_xml": [
"som_generationkwh_data.xml",
"giscedata_facturacio_view.xml",
"som_generationkwh_view.xml",
"wizard/wizard_investment_activation.xml",
"wizard/wizard_investment_amortization.xml",
"wizard/wizard_investment_payment.xml",
"investment_view.xml",
"assignment_view.xml",
"somenergia_soci_view.xml",
"somenergia_soci_data.xml",
"security/som_generationkwh_security.xml",
"security/ir.model.access.csv",
"amortization_report.xml",
"wizard/wizard_investment_creation.xml",
"wizard/wizard_investment_cancel_or_resign.xml"
],
"active": False,
"installable": True
}
| agpl-3.0 | Python |
35d14348ce419421bba2b043ea2818c185526301 | Comment out fix_fee_product_index from migration | cfpb/owning-a-home-api | ratechecker/migrations/0002_remove_fee_loader.py | ratechecker/migrations/0002_remove_fee_loader.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations, OperationalError, ProgrammingError
def fix_fee_product_index(apps, schema_editor):
try:
schema_editor.execute(
'DROP INDEX idx_16977_product_id;'
'ALTER TABLE cfpb.ratechecker_fee '
'DROP CONSTRAINT IF EXISTS idx_16977_product_id;'
'ALTER TABLE cfpb.ratechecker_fee '
'ADD CONSTRAINT idx_16977_product_id '
'UNIQUE (product_id, state_id, lender, single_family, condo, coop);'
)
except (ProgrammingError, OperationalError):
pass
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
#migrations.RunPython(fix_fee_product_index),
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations, OperationalError, ProgrammingError
def fix_fee_product_index(apps, schema_editor):
table_name = 'cfpb.ratechecker_fee'
index_name = 'idx_16977_product_id'
try:
schema_editor.execute(
'DROP INDEX idx_16977_product_id;'
'ALTER TABLE cfpb.ratechecker_fee '
'DROP CONSTRAINT IF EXISTS idx_16977_product_id;'
'ALTER TABLE cfpb.ratechecker_fee '
'ADD CONSTRAINT idx_16977_product_id '
'UNIQUE (product_id, state_id, lender, single_family, condo, coop);'
)
except (ProgrammingError, OperationalError):
pass
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.RunPython(fix_fee_product_index),
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
| cc0-1.0 | Python |
d3b7c78bb2505603b7d827f388cf9b68de70f442 | Añade filtros para busqueda de event | abertal/alpha,migonzalvar/alpha,migonzalvar/alpha,abertal/alpha,migonzalvar/alpha,abertal/alpha,migonzalvar/alpha,abertal/alpha | webapp/filters.py | webapp/filters.py | from django.db import models
from django.db.models import Value as V
from django.db.models import Q
from django.db.models.functions import Concat
from django.utils.translation import ugettext_lazy as _
import django_filters
from core.models import Group, Event
class PersonFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
label=_('Nombre'), name='name', method='custom_filter')
def custom_filter(self, queryset, name, value):
return queryset.annotate(
full_name=Concat('name', V(' '), 'surname',
output_field=models.TextField())).filter(Q(full_name__icontains=value))
class FromPersonFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
label=_('Nombre'), name='person__name', method='custom_filter')
def custom_filter(self, queryset, name, value):
return queryset.annotate(
full_name=Concat('person__name', V(' '), 'person__surname',
output_field=models.TextField())).filter(Q(full_name__icontains=value))
class GroupFilter(django_filters.FilterSet):
q = django_filters.CharFilter(name='group_name', method='custom_filter')
class Meta:
model = Group
fields = []
def custom_filter(self, queryset, name, value):
return queryset.filter(Q(group_name__icontains=value))
class EventFilter(django_filters.FilterSet):
q = django_filters.CharFilter(name='event_name', method='custom_filter')
class Meta:
model = Event
fields = []
def custom_filter(self, queryset, name, value):
return queryset.filter(Q(event_name__icontains=value))
| from django.db import models
from django.db.models import Value as V
from django.db.models import Q
from django.db.models.functions import Concat
from django.utils.translation import ugettext_lazy as _
import django_filters
from core.models import Group
class PersonFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
label=_('Nombre'), name='name', method='custom_filter')
def custom_filter(self, queryset, name, value):
return queryset.annotate(
full_name=Concat('name', V(' '), 'surname',
output_field=models.TextField())).filter(Q(full_name__icontains=value))
class FromPersonFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
label=_('Nombre'), name='person__name', method='custom_filter')
def custom_filter(self, queryset, name, value):
return queryset.annotate(
full_name=Concat('person__name', V(' '), 'person__surname',
output_field=models.TextField())).filter(Q(full_name__icontains=value))
class GroupFilter(django_filters.FilterSet):
q = django_filters.CharFilter(name='group_name', method='custom_filter')
class Meta:
model = Group
fields = []
def custom_filter(self, queryset, name, value):
return queryset.filter(Q(group_name__icontains=value))
| bsd-3-clause | Python |
2a83e5eba8a92d7391fb914e335859a38dd19451 | Add lint test and format generated code (#4114) | googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java | java-monitoring/google-cloud-monitoring/synth.py | java-monitoring/google-cloud-monitoring/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
gapic = gcp.GAPICGenerator()
service = 'monitoring'
versions = ['v3']
config_pattern = '/google/monitoring/artman_monitoring.yaml'
for version in versions:
library = gapic.java_library(
service=service,
version=version,
config_path=config_pattern.format(version=version),
artman_output_name='')
s.copy(library / f'gapic-google-cloud-{service}-{version}/src', 'src')
s.copy(library / f'grpc-google-cloud-{service}-{version}/src', f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
s.copy(library / f'proto-google-cloud-{service}-{version}/src', f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
java.format_code('./src')
java.format_code(f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
java.format_code(f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='monitoring',
version='v3',
config_path='/google/monitoring/artman_monitoring.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-monitoring-v3/src', 'src')
s.copy(library / 'grpc-google-cloud-monitoring-v3/src', '../../google-api-grpc/grpc-google-cloud-monitoring-v3/src')
s.copy(library / 'proto-google-cloud-monitoring-v3/src', '../../google-api-grpc/proto-google-cloud-monitoring-v3/src')
| apache-2.0 | Python |
98c30fc87b326466a76b574696056f154e4b4139 | add example for sensory stimulus | florisvb/multi_tracker | examples/sample_data/config_20160412_134708_N1.py | examples/sample_data/config_20160412_134708_N1.py | import imp
import os
import numpy as np
import multi_tracker_analysis as mta
def load_sensory_stimulus_on():
# write some code that reads a file and reformats stimulus on/off times to have the form of a list of epoch timestamps e.g. [[t1,t2],[t3,4]]
return [[1444888597, 1444888599], [1444888602, 1444888603]]
class Config(object):
def __init__(self, path, identifiercode=''):
if '.py' in path:
self.path = os.path.dirname(path)
else:
self.path = path
self.identifiercode = identifiercode
# pre-processing data parameters
self.preprocess_data_function = self.preprocess_data
self.minlength = 5 # in frames
self.maxspeed = 10 # pixels / frame
self.minspeed = 0
self.minimal_cumulative_distance_travelled = 4
# other parameters
if self.identifiercode == '20160412_134708_N1': # only do this for this particular example
self.sensory_stimulus_on = load_sensory_stimulus_on()
else:
self.sensory_stimulus_on = []
def preprocess_data(self, pandas_dataframe):
print 'Preprocessing data - see config file for details!'
pandas_dataframe = mta.read_hdf5_file_to_pandas.cull_short_trajectories(pandas_dataframe, self.minlength)
pandas_dataframe = mta.read_hdf5_file_to_pandas.remove_rows_above_speed_threshold(pandas_dataframe, speed_threshold=self.maxspeed)
pandas_dataframe = mta.read_hdf5_file_to_pandas.remove_objects_that_never_exceed_minimum_speed(pandas_dataframe, speed_threshold=self.minspeed)
pandas_dataframe = mta.read_hdf5_file_to_pandas.cull_trajectories_that_do_not_cover_much_x_or_y_distance(pandas_dataframe, min_distance_travelled=self.minimal_cumulative_distance_travelled)
# Delete cut and join trajectories
instructions_filename = mta.read_hdf5_file_to_pandas.get_filename(self.path, 'delete_cut_join_instructions.pickle')
if instructions_filename is not None:
pandas_dataframe = mta.read_hdf5_file_to_pandas.delete_cut_join_trajectories_according_to_instructions(pandas_dataframe, instructions_filename)
else:
print 'No delete cut join instructions found in path!'
return pandas_dataframe
| import imp
import os
import numpy as np
import multi_tracker_analysis as mta
class Config(object):
def __init__(self, path, identifiercode=''):
if '.py' in path:
self.path = os.path.dirname(path)
else:
self.path = path
self.identifiercode = identifiercode
# pre-processing data parameters
self.preprocess_data_function = self.preprocess_data
self.minlength = 5 # in frames
self.maxspeed = 10 # pixels / frame
self.minspeed = 0
self.minimal_cumulative_distance_travelled = 4
# other parameters
self.sensory_stimulus_on = []
def preprocess_data(self, pandas_dataframe):
print 'Preprocessing data - see config file for details!'
pandas_dataframe = mta.read_hdf5_file_to_pandas.cull_short_trajectories(pandas_dataframe, self.minlength)
pandas_dataframe = mta.read_hdf5_file_to_pandas.remove_rows_above_speed_threshold(pandas_dataframe, speed_threshold=self.maxspeed)
pandas_dataframe = mta.read_hdf5_file_to_pandas.remove_objects_that_never_exceed_minimum_speed(pandas_dataframe, speed_threshold=self.minspeed)
pandas_dataframe = mta.read_hdf5_file_to_pandas.cull_trajectories_that_do_not_cover_much_x_or_y_distance(pandas_dataframe, min_distance_travelled=self.minimal_cumulative_distance_travelled)
# Delete cut and join trajectories
instructions_filename = mta.read_hdf5_file_to_pandas.get_filename(self.path, 'delete_cut_join_instructions.pickle')
if instructions_filename is not None:
pandas_dataframe = mta.read_hdf5_file_to_pandas.delete_cut_join_trajectories_according_to_instructions(pandas_dataframe, instructions_filename)
else:
print 'No delete cut join instructions found in path!'
return pandas_dataframe
| mit | Python |
43e1914d9c74ddd9ab366596c85e3c8e4b83ce92 | Change negating. | phac-nml/bio_hansel | bio_hansel/quality_check/qc_utils.py | bio_hansel/quality_check/qc_utils.py | from typing import Tuple
from bio_hansel.subtype import Subtype
from pandas import DataFrame
'''
[get_conflicting_tiles]
Input: Subtype, DataFrame
Output: List
Desc: The purpose of this method is to find positive and negative tiles for the same refposition in the dataframe.
The method will return a list with the conflicting tiles.
'''
def get_conflicting_tiles(st: Subtype, df: DataFrame) -> list:
if st.subtype:
if 'is_kmer_freq_okay' in df:
dfst = df[(df['subtype'] == str(st.subtype)) & (df['is_kmer_freq_okay'])]
else: # fasta files
dfst = df[(df['subtype'] == str(st.subtype))]
pos_tiles = dfst[dfst['is_pos_tile']]
neg_tiles = dfst[~dfst['is_pos_tile']]
pos_tile_values = '|'.join(pos_tiles['refposition'].values.tolist())
conflicting_tiles = neg_tiles[neg_tiles['refposition'].str.contains(pos_tile_values)][
'refposition'].values.tolist()
return conflicting_tiles
'''
[get_num_pos_neg_tiles]
Input: Subtype, DataFrame
Output: Tuple[int,int]
Desc: The purpose of this method is to find the number of positive and negative tiles that exist for a subtype.
'''
def get_num_pos_neg_tiles(st: Subtype, df: DataFrame) -> Tuple[int, int]:
num_pos_tiles = 0
num_neg_tiles = 0
if st.subtype:
dfst = df[(df['subtype'] == str(st.subtype))]
num_pos_tiles = dfst[dfst['is_pos_tile']].shape[0]
num_neg_tiles = dfst[~dfst['is_pos_tile']].shape[0]
return num_pos_tiles, num_neg_tiles
'''
[possible_subtypes_exist_in_df]
Input: Subtype, Dataframe
Output: bool
True: Possible subtypes exist within the df, and their frequencies are okay.
False: Possible subtypes do not exist within the df, this is the pre-req of an inconsistent result.
Desc: The purpose of this method is to find if the possible subtypes exist within the df.
'''
def possible_subtypes_exist_in_df(st: Subtype, df: DataFrame) -> list:
non_present_subtypes = []
possible_subtypes = st.possible_downstream_subtypes
if possible_subtypes:
for subtype in possible_subtypes:
if subtype not in df['subtype']:
non_present_subtypes.append(subtype)
return non_present_subtypes
| from typing import Tuple
from bio_hansel.subtype import Subtype
from pandas import DataFrame
'''
[get_conflicting_tiles]
Input: Subtype, DataFrame
Output: List
Desc: The purpose of this method is to find positive and negative tiles for the same refposition in the dataframe.
The method will return a list with the conflicting tiles.
'''
def get_conflicting_tiles(st: Subtype, df: DataFrame) -> list:
if st.subtype:
if 'is_kmer_freq_okay' in df:
dfst = df[(df['subtype'] == str(st.subtype)) & (df['is_kmer_freq_okay'])]
else: # fasta files
dfst = df[(df['subtype'] == str(st.subtype))]
pos_tiles = dfst[dfst['is_pos_tile']]
neg_tiles = dfst[dfst['is_pos_tile'] == False]
pos_tile_values = '|'.join(pos_tiles['refposition'].values.tolist())
conflicting_tiles = neg_tiles[neg_tiles['refposition'].str.contains(pos_tile_values)][
'refposition'].values.tolist()
return conflicting_tiles
'''
[get_num_pos_neg_tiles]
Input: Subtype, DataFrame
Output: Tuple[int,int]
Desc: The purpose of this method is to find the number of positive and negative tiles that exist for a subtype.
'''
def get_num_pos_neg_tiles(st: Subtype, df: DataFrame) -> Tuple[int, int]:
num_pos_tiles = 0
num_neg_tiles = 0
if st.subtype:
dfst = df[(df['subtype'] == str(st.subtype))]
num_pos_tiles = dfst[dfst['is_pos_tile']].shape[0]
num_neg_tiles = dfst[dfst['is_pos_tile'] == False].shape[0]
return num_pos_tiles, num_neg_tiles
'''
[possible_subtypes_exist_in_df]
Input: Subtype, Dataframe
Output: bool
True: Possible subtypes exist within the df, and their frequencies are okay.
False: Possible subtypes do not exist within the df, this is the pre-req of an inconsistent result.
Desc: The purpose of this method is to find if the possible subtypes exist within the df.
'''
def possible_subtypes_exist_in_df(st: Subtype, df: DataFrame) -> list:
non_present_subtypes = []
possible_subtypes = st.possible_downstream_subtypes
if possible_subtypes:
for subtype in possible_subtypes:
if subtype not in df['subtype']:
non_present_subtypes.append(subtype)
return non_present_subtypes
| apache-2.0 | Python |
e0c311e04be19bd42f9e830200b975f6b7135434 | Fix a lint issue | arthur-wsw/pinax-announcements,arthur-wsw/pinax-announcements,pinax/django-announcements,pinax/django-announcements,pinax/pinax-announcements | pinax/announcements/models.py | pinax/announcements/models.py | from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Announcement(models.Model):
"""
A single announcement.
"""
DISMISSAL_NO = 1
DISMISSAL_SESSION = 2
DISMISSAL_PERMANENT = 3
DISMISSAL_CHOICES = [
(DISMISSAL_NO, _("No Dismissals Allowed")),
(DISMISSAL_SESSION, _("Session Only Dismissal")),
(DISMISSAL_PERMANENT, _("Permanent Dismissal Allowed"))
]
title = models.CharField(_("title"), max_length=50)
content = models.TextField(_("content"))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("creator"))
creation_date = models.DateTimeField(_("creation_date"), default=timezone.now)
site_wide = models.BooleanField(_("site wide"), default=False)
members_only = models.BooleanField(_("members only"), default=False)
dismissal_type = models.IntegerField(choices=DISMISSAL_CHOICES, default=DISMISSAL_SESSION)
publish_start = models.DateTimeField(_("publish_start"), default=timezone.now)
publish_end = models.DateTimeField(_("publish_end"), blank=True, null=True)
def get_absolute_url(self):
return reverse("announcements_detail", args=[self.pk])
def dismiss_url(self):
if self.dismissal_type != Announcement.DISMISSAL_NO:
return reverse("announcements_dismiss", args=[self.pk])
def __unicode__(self):
return self.title
class Meta:
verbose_name = _("announcement")
verbose_name_plural = _("announcements")
class Dismissal(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="announcement_dismissals")
announcement = models.ForeignKey(Announcement, related_name="dismissals")
dismissed_at = models.DateTimeField(default=timezone.now)
| from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Announcement(models.Model):
"""
A single announcement.
"""
DISMISSAL_NO = 1
DISMISSAL_SESSION = 2
DISMISSAL_PERMANENT = 3
DISMISSAL_CHOICES = [
(DISMISSAL_NO, _("No Dismissals Allowed")),
(DISMISSAL_SESSION, _("Session Only Dismissal")),
(DISMISSAL_PERMANENT, _("Permanent Dismissal Allowed"))
]
title = models.CharField(_("title"), max_length=50)
content = models.TextField(_("content"))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("creator"))
creation_date = models.DateTimeField(_("creation_date"), default=timezone.now)
site_wide = models.BooleanField(_("site wide"), default=False)
members_only = models.BooleanField(_("members only"), default=False)
dismissal_type = models.IntegerField(choices=DISMISSAL_CHOICES, default=DISMISSAL_SESSION)
publish_start = models.DateTimeField(_("publish_start"), default=timezone.now)
publish_end = models.DateTimeField(_("publish_end"), blank=True, null=True)
def get_absolute_url(self):
return reverse("announcements_detail", args=[self.pk])
def dismiss_url(self):
if self.dismissal_type != Announcement.DISMISSAL_NO:
return reverse("announcements_dismiss", args=[self.pk])
def __unicode__(self):
return self.title
class Meta:
verbose_name = _("announcement")
verbose_name_plural = _("announcements")
class Dismissal(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="announcement_dismissals")
announcement = models.ForeignKey(Announcement, related_name="dismissals")
dismissed_at = models.DateTimeField(default=timezone.now)
| mit | Python |
4f9b5700ae6247c282ff4bcc40ec0f2cf3a2ff35 | Bump version to 1.18.0 | platformio/platformio-api | platformio_api/__init__.py | platformio_api/__init__.py | # Copyright 2014-present Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
from time import tzset
VERSION = (1, 18, 0)
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio-api"
__description__ = ("An API for PlatformIO")
__url__ = "https://github.com/ivankravets/platformio-api"
__author__ = "Ivan Kravets"
__email__ = "me@ikravets.com"
__license__ = "MIT License"
__copyright__ = "Copyright (C) 2014-2017 Ivan Kravets"
config = dict(
SQLALCHEMY_DATABASE_URI=None,
GITHUB_LOGIN=None,
GITHUB_PASSWORD=None,
DL_PIO_DIR=None,
DL_PIO_URL=None,
MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb
# Fuzzy search will not be applied to words shorter than the value below
SOLR_FUZZY_MIN_WORD_LENGTH=3,
LOGGING=dict(version=1)
)
assert "PIOAPI_CONFIG_PATH" in os.environ
with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f:
config.update(json.load(f))
# configure logging for packages
logging.basicConfig()
logging.config.dictConfig(config['LOGGING'])
# setup time zone to UTC globally
os.environ['TZ'] = "+00:00"
tzset()
| # Copyright 2014-present Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
from time import tzset
VERSION = (1, 17, 6)
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio-api"
__description__ = ("An API for PlatformIO")
__url__ = "https://github.com/ivankravets/platformio-api"
__author__ = "Ivan Kravets"
__email__ = "me@ikravets.com"
__license__ = "MIT License"
__copyright__ = "Copyright (C) 2014-2017 Ivan Kravets"
config = dict(
SQLALCHEMY_DATABASE_URI=None,
GITHUB_LOGIN=None,
GITHUB_PASSWORD=None,
DL_PIO_DIR=None,
DL_PIO_URL=None,
MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb
# Fuzzy search will not be applied to words shorter than the value below
SOLR_FUZZY_MIN_WORD_LENGTH=3,
LOGGING=dict(version=1)
)
assert "PIOAPI_CONFIG_PATH" in os.environ
with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f:
config.update(json.load(f))
# configure logging for packages
logging.basicConfig()
logging.config.dictConfig(config['LOGGING'])
# setup time zone to UTC globally
os.environ['TZ'] = "+00:00"
tzset()
| apache-2.0 | Python |
0bfcca4d11722c92d1fa9fe8da30a9a118d7f8ea | Fix bounty constructor; add verification; NO PING | gappleto97/Senior-Project | common/bounty.py | common/bounty.py | import os, pickle, re
from common.safeprint import safeprint
bountyList = []
class Bounty:
ip = ""
btc = ""
reward = 0
data = []
def __init__(self, ipAddress, btcAddress, rewardAmount, dataList=[]):
self.ip = ipAddress
self.btc = btcAddress
self.reward = rewardAmount
self.data = dataList
def isValid(self):
try:
safeprint("Testing IP address")
#is IP valid
b = int(self.ip.split(":")[1])
b = int(self.ip.split(":")[0].split(".")[0])
b = int(self.ip.split(":")[0].split(".")[1])
b = int(self.ip.split(":")[0].split(".")[2])
b = int(self.ip.split(":")[0].split(".")[3])
#ping IP
#is Bitcoin address valid
safeprint("Testing Bitcoin address")
address = str(self.btc)
if not re.match(re.compile("^[a-zA-Z1-9]{27,35}$"),address):
return False
#is reward valid
safeprint("Testing reward")
b = int(self.reward)
return True
except:
return False
def isPayable(self):
#check if address has enough
return False
def verify(string):
test = pickle.loads(string)
try:
safeprint("Testing IP address")
#is IP valid
b = int(test.ip.split(":")[1])
b = int(test.ip.split(":")[0].split(".")[0])
b = int(test.ip.split(":")[0].split(".")[1])
b = int(test.ip.split(":")[0].split(".")[2])
b = int(test.ip.split(":")[0].split(".")[3])
#ping IP
#is Bitcoin address valid
safeprint("Testing Bitcoin address")
address = str(test.btc)
if not re.match(re.compile("^[a-zA-Z1-9]{27,35}$"),address):
return False
#is reward valid
safeprint("Testing reward")
b = int(test.reward)
return True
except:
return False
def saveToFile():
if os.path.exists("bounties.pickle"):
pickle.dump(boutyList,"bounties.pickle")
return True
return False
def loadFromFile():
if os.path.exists("settings.conf"):
bountyList = pickle.load("bounties.pickle")
return True
return False
def loadBounties():
loadFromFile()
if len(bountyList) is 0:
requestBounties()
return len(bountyList) is not 0
def requestBounties(peerList):
for peer in peerList:
bountyList.extend(requestBounty(peer))
def requestBounty(peer):
safeprint("currently unsupported")
def sendBounty(peer):
safeprint("currently unsupported")
if len(bountyList) is 0:
loadBounties()
#send bounties
dumpBounties()
def getBounty(charity, factor):
for bounty in bountyList:
if best is None:
best = bounty
elif best.rewardAmount < bounty.rewardAmount and bounty.isValid() and (isPayable(factor) or charity):
best = bounty
return best
| import os, pickle
from common.safeprint import safeprint
bountyList = []
class Bounty:
ip = ""
btc = ""
reward = 0
data = []
def __init__(ipAddress, btcAddress, rewardAmount):
ip = ipAddress
btc = btcAddress
reward = rewardAmount
def __init__(ipAddress, btcAddress, rewardAmount, dataList):
ip = ipAddress
btc = btcAddress
reward = rewardAmount
data = dataList
def addData(dataList):
data = dataList
def isValid():
#ping ip
#check if all fields have things
return False
def isPayable():
#check if address has enough
return False
def saveToFile():
if os.path.exists("bounties.pickle"):
pickle.dump(boutyList,"bounties.pickle")
return True
return False
def loadFromFile():
if os.path.exists("settings.conf"):
bountyList = pickle.load("bounties.pickle")
return True
return False
def loadBounties():
loadFromFile()
if len(bountyList) is 0:
requestBounties()
return len(bountyList) is not 0
def requestBounties(peerList):
for peer in peerList:
bountyList.extend(requestBounty(peer))
def requestBounty(peer):
safeprint("currently unsupported")
def sendBounty(peer):
safeprint("currently unsupported")
if len(bountyList) is 0:
loadBounties()
#send bounties
dumpBounties()
def getBounty(charity, factor):
for bounty in bountyList:
if best is None:
best = bounty
elif best.rewardAmount < bounty.rewardAmount and bounty.isValid() and (isPayable(factor) or charity):
best = bounty
return best
| mit | Python |
d6bfe637dad3ddddf980127646cecc4d3e6b7db3 | use pfctl for IP check (#4320) | opnsense/core,opnsense/core,opnsense/core,opnsense/core,opnsense/core,opnsense/core | src/opnsense/scripts/filter/find_table_references.py | src/opnsense/scripts/filter/find_table_references.py | #!/usr/local/bin/python3
"""
Copyright (c) 2018-2019 Deciso B.V.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
check which aliases match the given IP
"""
import tempfile
import subprocess
import os
import sys
import ujson
from netaddr import IPNetwork, IPAddress, AddrFormatError
if __name__ == '__main__':
# IP should have been passed as a command line argument
if len(sys.argv) >= 1:
try:
ip = IPAddress(sys.argv[1])
result = {'status': 'ok', 'matches': []}
tables = []
# Fetch tables
sp = subprocess.run(['/sbin/pfctl', '-sT'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
tables.append(line.strip())
# Test given address against tables
for table in tables:
sp = subprocess.run(['/sbin/pfctl', '-t', table, '-Ttest', sys.argv[1]], capture_output=True, text=True)
line = sp.stderr.strip()
if line.find("1/1") == 0:
result['matches'].append(table)
print(ujson.dumps(result))
except AddrFormatError:
print(ujson.dumps({'status': 'Invalid IP specified!'}))
else:
print(ujson.dumps({'status': 'IP parameter not specified!'}))
| #!/usr/local/bin/python3
"""
Copyright (c) 2018-2019 Deciso B.V.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
check which aliases match the given IP
"""
import tempfile
import subprocess
import os
import sys
import ujson
from netaddr import IPNetwork, IPAddress, AddrFormatError
if __name__ == '__main__':
# IP should have been passed as a command line argument
if len(sys.argv) >= 1:
try:
ip = IPAddress(sys.argv[1])
result = {'status': 'ok', 'matches': []}
tables = []
# Fetch tables
sp = subprocess.run(['/sbin/pfctl', '-sT'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
tables.append(line.strip())
# Fetch IP ranges in this table and check if they match
for table in tables:
sp = subprocess.run(['/sbin/pfctl', '-t', table, '-T', 'show'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
if line.strip() != "":
if ip in IPNetwork(line.strip()):
result['matches'].append(table)
print(ujson.dumps(result))
except AddrFormatError:
print(ujson.dumps({'status': 'Invalid IP specified!'}))
else:
print(ujson.dumps({'status': 'IP parameter not specified!'}))
| bsd-2-clause | Python |
924f5b5467d3ee395fa957e992ea716ce5c3a835 | Add support of facts gathering WWNs on Solaris 10 and Solaris 11 OS (#52091) | thaim/ansible,thaim/ansible | lib/ansible/module_utils/facts/network/fc_wwn.py | lib/ansible/module_utils/facts/network/fc_wwn.py | # Fibre Channel WWN initiator related facts collection for ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import glob
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
class FcWwnInitiatorFactCollector(BaseFactCollector):
name = 'fibre_channel_wwn'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Example contents /sys/class/fc_host/*/port_name:
0x21000014ff52a9bb
"""
fc_facts = {}
fc_facts['fibre_channel_wwn'] = []
if sys.platform.startswith('linux'):
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
for line in get_file_lines(fcfile):
fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:])
elif sys.platform.startswith('sunos'):
"""
on solaris 10 or solaris 11 should use `fcinfo hba-port`
on solaris 9, `prtconf -pv`
"""
cmd = module.get_bin_path('fcinfo')
cmd = cmd + " hba-port | grep 'Port WWN'"
rc, fcinfo_out, err = module.run_command(cmd, use_unsafe_shell=True)
"""
# fcinfo hba-port | grep "Port WWN"
HBA Port WWN: 10000090fa1658de
"""
if fcinfo_out:
for line in fcinfo_out.splitlines():
data = line.split(' ')
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
return fc_facts
| # Fibre Channel WWN initiator related facts collection for ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import glob
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
class FcWwnInitiatorFactCollector(BaseFactCollector):
name = 'fibre_channel_wwn'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Example contents /sys/class/fc_host/*/port_name:
0x21000014ff52a9bb
"""
fc_facts = {}
fc_facts['fibre_channel_wwn'] = []
if sys.platform.startswith('linux'):
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
for line in get_file_lines(fcfile):
fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:])
return fc_facts
| mit | Python |
65ac4143dbf2f69f74b86c071f40f640c33873b3 | prepare for entry to SQL - not SQL itself | danodonovan/django-condor,danodonovan/django-condor | condor/fields.py | condor/fields.py | from django.db import models
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson as json
# Brad Jasper's JSONField https://github.com/bradjasper/django-jsonfield
class JSONField(models.TextField):
"""JSONField is a generic textfield that serializes/unserializes JSON objects"""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
self.dump_kwargs = kwargs.pop('dump_kwargs', {'cls': DjangoJSONEncoder})
self.load_kwargs = kwargs.pop('load_kwargs', {})
super(JSONField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""Convert string value to JSON"""
if isinstance(value, basestring):
try:
return json.loads(value, **self.load_kwargs)
except ValueError:
pass
return value
def get_prep_value(self, value):
"""Convert JSON object to a string"""
if isinstance(value, basestring):
return value
return json.dumps(value, **self.dump_kwargs)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value) | from django.db import models
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson as json
# Brad Jasper's JSONField https://github.com/bradjasper/django-jsonfield
class JSONField(models.TextField):
"""JSONField is a generic textfield that serializes/unserializes JSON objects"""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
self.dump_kwargs = kwargs.pop('dump_kwargs', {'cls': DjangoJSONEncoder})
self.load_kwargs = kwargs.pop('load_kwargs', {})
super(JSONField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""Convert string value to JSON"""
if isinstance(value, basestring):
try:
return json.loads(value, **self.load_kwargs)
except ValueError:
pass
return value
def get_db_prep_value(self, value):
"""Convert JSON object to a string"""
if isinstance(value, basestring):
return value
return json.dumps(value, **self.dump_kwargs)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value) | bsd-3-clause | Python |
d1a7a56c1480d9d17a8ddef359787bba2d05aed8 | fix encoding | tehron/tehbot,spaceone/tehbot | plugins/translate/__init__.py | plugins/translate/__init__.py | import plugins
import urllib
import urllib2
import lxml.html
def translate(connection, channel, nick, cmd, args):
if not args:
return
headers = { 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:14.0) Gecko/20100101 Firefox/14.0.1' }
data = { 'sl' : 'auto', 'tl' : 'en', 'hl' : 'en', 'ie' : 'UTF-8', 'q' : plugins.to_utf8(args) }
req = urllib2.Request("https://translate.google.com/", urllib.urlencode(data), headers)
tree = lxml.html.parse(urllib2.urlopen(req))
trans = tree.xpath("//span[@id='result_box']")
if len(trans) > 0:
txt = trans[0].text_content().strip()
plugins.say(connection, channel, "Translation: %s" % txt)
plugins.register_pub_cmd("translate", translate)
| import plugins
import urllib
import urllib2
import lxml.html
def translate(connection, channel, nick, cmd, args):
if not args:
return
headers = { 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:14.0) Gecko/20100101 Firefox/14.0.1' }
data = { 'sl' : 'auto', 'tl' : 'en', 'hl' : 'en', 'ie' : 'UTF-8', 'q' : args }
req = urllib2.Request("https://translate.google.com/", urllib.urlencode(data), headers)
tree = lxml.html.parse(urllib2.urlopen(req))
trans = tree.xpath("//span[@id='result_box']")
if len(trans) > 0:
txt = trans[0].text_content().strip()
plugins.say(connection, channel, "Translation: %s" % txt)
plugins.register_pub_cmd("translate", translate)
| mit | Python |
f21bb9d1b1d08fab8a127dc839b688338df30576 | Fix auto-encoder | uaca/deepy,zomux/deepy,uaca/deepy,uaca/deepy,zomux/deepy | experiments/auto_encoders/rnn_auto_encoder.py | experiments/auto_encoders/rnn_auto_encoder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from deepy.networks import AutoEncoder
from deepy.layers import RNN, Dense
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from util import get_data, VECTOR_SIZE, SEQUENCE_LENGTH
HIDDEN_SIZE = 50
model_path = os.path.join(os.path.dirname(__file__), "models", "rnn1.gz")
if __name__ == '__main__':
model = AutoEncoder(input_dim=VECTOR_SIZE, input_tensor=3)
model.stack_encoders(RNN(hidden_size=HIDDEN_SIZE, input_type="sequence", output_type="one"))
model.stack_decoders(RNN(hidden_size=HIDDEN_SIZE, input_type="one", output_type="sequence", steps=SEQUENCE_LENGTH),
Dense(VECTOR_SIZE, 'softmax'))
trainer = SGDTrainer(model)
annealer = LearningRateAnnealer(trainer)
trainer.run(get_data(), controllers=[annealer])
model.save_params(model_path)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from deepy.networks import AutoEncoder
from deepy.layers import RNN, Dense
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from util import get_data, VECTOR_SIZE, SEQUENCE_LENGTH
HIDDEN_SIZE = 50
model_path = os.path.join(os.path.dirname(__file__), "models", "rnn1.gz")
if __name__ == '__main__':
model = AutoEncoder(input_dim=VECTOR_SIZE, input_tensor=3)
model.stack_encoders(RNN(hidden_size=HIDDEN_SIZE, input_type="sequence", output_type="one"))
model.stack_decoders(RNN(hidden_size=HIDDEN_SIZE, input_type="one", output_type="sequence", steps=SEQUENCE_LENGTH),
Dense(VECTOR_SIZE, 'softmax'))
trainer = SGDTrainer(model)
annealer = LearningRateAnnealer(trainer)
trainer.run(get_data(), controllers=[annealer])
model.save_params(model_path)
| mit | Python |
64d98bb88b5b7d2c98cfdd22d74761a80003e769 | bump version | arraystream/simpleplotly | weplot/version.py | weplot/version.py | __version__ = '0.0.9'
| __version__ = '0.0.8'
| mit | Python |
51c7728b6304f678e0eb6460470e0f5cc95b816f | Use print() function in both Python 2 and Python 3 (#52) | magenta/magenta-demos,magenta/magenta-demos,magenta/magenta-demos,magenta/magenta-demos,magenta/magenta-demos | ai-jam-js/maybe_download_mags.py | ai-jam-js/maybe_download_mags.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves.urllib.request import URLopener
# This script downloads these .mag files if not already present.
mag_files = [
'http://download.magenta.tensorflow.org/models/attention_rnn.mag',
'http://download.magenta.tensorflow.org/models/performance.mag',
'http://download.magenta.tensorflow.org/models/pianoroll_rnn_nade.mag',
'http://download.magenta.tensorflow.org/models/drum_kit_rnn.mag',
]
for mag_file in mag_files:
output_file = mag_file.split('/')[-1]
if os.path.exists(output_file):
print('File %s already present' % mag_file)
else:
print('Writing %s to %s' % (mag_file, output_file))
urlopener = URLopener()
urlopener.retrieve(mag_file, output_file)
| import os
import urllib
# This script downloads these .mag files if not already present.
mag_files = [
'http://download.magenta.tensorflow.org/models/attention_rnn.mag',
'http://download.magenta.tensorflow.org/models/performance.mag',
'http://download.magenta.tensorflow.org/models/pianoroll_rnn_nade.mag',
'http://download.magenta.tensorflow.org/models/drum_kit_rnn.mag',
]
for mag_file in mag_files:
output_file = mag_file.split('/')[-1]
if os.path.exists(output_file):
print 'File %s already present' % mag_file
else:
print 'Writing %s to %s' % (mag_file, output_file)
urlopener = urllib.URLopener()
urlopener.retrieve(mag_file, output_file)
| apache-2.0 | Python |
1db4d00640432e5ce3acc9580127527c49a30d64 | Clear base_api cache in tests. | alephdata/aleph,alephdata/aleph,pudo/aleph,pudo/aleph,alephdata/aleph,alephdata/aleph,pudo/aleph,alephdata/aleph | aleph/tests/test_sessions_api.py | aleph/tests/test_sessions_api.py | import jwt
from aleph.core import db, settings
from aleph.model import Collection
from aleph.logic.collections import update_collection
from aleph.views.base_api import _metadata_locale
from aleph.tests.util import TestCase
from aleph.tests.factories.models import RoleFactory
class SessionsApiTestCase(TestCase):
def setUp(self):
super(SessionsApiTestCase, self).setUp()
self.role = RoleFactory.create()
def test_admin_all_access(self):
self.wl = Collection()
self.wl.label = "Test Collection"
self.wl.foreign_id = 'test'
self.wl.creator = self.create_user('watcher')
db.session.add(self.wl)
db.session.commit()
update_collection(self.wl)
_, headers = self.login(foreign_id='admin', is_admin=True)
res = self.client.get('/api/2/collections/%s' % self.wl.id,
headers=headers)
assert res.status_code == 200, res
def test_metadata_get_with_password_registration_enabled(self):
_metadata_locale.cache_clear()
res = self.client.get('/api/2/metadata')
assert res.status_code == 200, res
auth = res.json['auth']
assert not auth.get('oauth_uri'), auth
assert auth['registration_uri'], res
def test_metadata_get_without_password_login(self):
_metadata_locale.cache_clear()
settings.PASSWORD_LOGIN = False
res = self.client.get('/api/2/metadata')
assert res.status_code == 200, res
auth = res.json['auth']
assert not auth.get('oauth_uri'), auth
assert not auth.get('password_login_uri'), auth
assert not auth.get('registration_uri'), auth
def test_password_login_get(self):
res = self.client.get('/api/2/sessions/login')
assert res.status_code == 405, res
def test_password_login_post_no_data(self):
settings.PASSWORD_LOGIN = True
res = self.client.post('/api/2/sessions/login')
assert res.status_code == 400, res
def test_password_login_post_good_email_and_password(self):
settings.PASSWORD_LOGIN = True
secret = self.fake.password()
self.role.set_password(secret)
data = dict(email=self.role.email, password=secret)
res = self.client.post('/api/2/sessions/login', data=data)
assert res.status_code == 200, res
data = jwt.decode(res.json['token'], verify=False)
assert data['role']['id'] == str(self.role.id), res
| import jwt
from aleph.core import db, settings
from aleph.model import Collection
from aleph.logic.collections import update_collection
from aleph.tests.util import TestCase
from aleph.tests.factories.models import RoleFactory
class SessionsApiTestCase(TestCase):
def setUp(self):
super(SessionsApiTestCase, self).setUp()
self.role = RoleFactory.create()
def test_admin_all_access(self):
self.wl = Collection()
self.wl.label = "Test Collection"
self.wl.foreign_id = 'test'
self.wl.creator = self.create_user('watcher')
db.session.add(self.wl)
db.session.commit()
update_collection(self.wl)
_, headers = self.login(foreign_id='admin', is_admin=True)
res = self.client.get('/api/2/collections/%s' % self.wl.id,
headers=headers)
assert res.status_code == 200, res
def test_metadata_get_with_password_registration_enabled(self):
res = self.client.get('/api/2/metadata')
assert res.status_code == 200, res
auth = res.json['auth']
assert not auth.get('oauth_uri'), auth
assert auth['registration_uri'], res
def test_metadata_get_without_password_login(self):
settings.PASSWORD_LOGIN = False
res = self.client.get('/api/2/metadata')
assert res.status_code == 200, res
auth = res.json['auth']
assert not auth.get('oauth_uri'), auth
assert not auth.get('password_login_uri'), auth
assert not auth.get('registration_uri'), auth
def test_password_login_get(self):
res = self.client.get('/api/2/sessions/login')
assert res.status_code == 405, res
def test_password_login_post_no_data(self):
settings.PASSWORD_LOGIN = True
res = self.client.post('/api/2/sessions/login')
assert res.status_code == 400, res
def test_password_login_post_good_email_and_password(self):
settings.PASSWORD_LOGIN = True
secret = self.fake.password()
self.role.set_password(secret)
data = dict(email=self.role.email, password=secret)
res = self.client.post('/api/2/sessions/login', data=data)
assert res.status_code == 200, res
data = jwt.decode(res.json['token'], verify=False)
assert data['role']['id'] == str(self.role.id), res
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.