commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
20e63fb5b5a02966acbe66f3cda19bc59ff89934 | Set a default value for a default_roles | monasca_log_api/conf/role_middleware.py | monasca_log_api/conf/role_middleware.py | # Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
role_m_opts = [
cfg.ListOpt(name='path',
default='/',
help='List of paths where middleware applies to'),
cfg.ListOpt(name='default_roles',
default=['monasca-user'],
help='List of roles allowed to enter api'),
cfg.ListOpt(name='agent_roles',
default=None,
help=('List of roles, that if set, mean that request '
'comes from agent, thus is authorized in the same '
'time')),
cfg.ListOpt(name='delegate_roles',
default=['admin'],
help=('Roles that are allowed to POST logs on '
'behalf of another tenant (project)'))
]
role_m_group = cfg.OptGroup(name='roles_middleware', title='roles_middleware')
def register_opts(conf):
conf.register_group(role_m_group)
conf.register_opts(role_m_opts, role_m_group)
def list_opts():
return role_m_group, role_m_opts
| # Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
role_m_opts = [
cfg.ListOpt(name='path',
default='/',
help='List of paths where middleware applies to'),
cfg.ListOpt(name='default_roles',
default=None,
help='List of roles allowed to enter api'),
cfg.ListOpt(name='agent_roles',
default=None,
help=('List of roles, that if set, mean that request '
'comes from agent, thus is authorized in the same '
'time')),
cfg.ListOpt(name='delegate_roles',
default=['admin'],
help=('Roles that are allowed to POST logs on '
'behalf of another tenant (project)'))
]
role_m_group = cfg.OptGroup(name='roles_middleware', title='roles_middleware')
def register_opts(conf):
conf.register_group(role_m_group)
conf.register_opts(role_m_opts, role_m_group)
def list_opts():
return role_m_group, role_m_opts
| Python | 0.000915 |
dd725349e0613461bdbe75a0c32115b323e9ccc3 | change settings import in wsgi for Travis CI | reflow/wsgi.py | reflow/wsgi.py | """
WSGI config for reflow project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
# Set matplotlib configuration directory, else Django complains it is not writable
# We'll just use a tempfile
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
paths = [
'/srv/django-projects/ReFlow',
'/srv/django-projects/ReFlow/reflow'
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reflow.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
import settings_sample
if settings.INTERACTIVE_DEBUG:
class Debugger:
def __init__(self, object):
self.__object = object
def __call__(self, *args, **kwargs):
import pdb
debugger = pdb.Pdb()
debugger.use_rawinput = 0
debugger.reset()
sys.settrace(debugger.trace_dispatch)
try:
return self.__object(*args, **kwargs)
finally:
debugger.quitting = 1
sys.settrace(None)
application = Debugger(get_wsgi_application())
else:
application = get_wsgi_application()
| """
WSGI config for reflow project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
# Set matplotlib configuration directory, else Django complains it is not writable
# We'll just use a tempfile
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
paths = [
'/srv/django-projects/ReFlow',
'/srv/django-projects/ReFlow/reflow'
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reflow.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
import settings
if settings.INTERACTIVE_DEBUG:
class Debugger:
def __init__(self, object):
self.__object = object
def __call__(self, *args, **kwargs):
import pdb
debugger = pdb.Pdb()
debugger.use_rawinput = 0
debugger.reset()
sys.settrace(debugger.trace_dispatch)
try:
return self.__object(*args, **kwargs)
finally:
debugger.quitting = 1
sys.settrace(None)
application = Debugger(get_wsgi_application())
else:
application = get_wsgi_application()
| Python | 0 |
6fb1b24a3cf1a4cdb3bd35c6f575d96cb2da9415 | Add binding for DSA_size | cryptography/hazmat/bindings/openssl/dsa.py | cryptography/hazmat/bindings/openssl/dsa.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/dsa.h>
"""
TYPES = """
typedef struct dsa_st {
// prime number (public)
BIGNUM *p;
// 160-bit subprime, q | p-1 (public)
BIGNUM *q;
// generator of subgroup (public)
BIGNUM *g;
// private key x
BIGNUM *priv_key;
// public key y = g^x
BIGNUM *pub_key;
...;
} DSA;
typedef struct {
BIGNUM *r;
BIGNUM *s;
} DSA_SIG;
"""
FUNCTIONS = """
DSA *DSA_generate_parameters(int, unsigned char *, int, int *, unsigned long *,
void (*)(int, int, void *), void *);
int DSA_generate_key(DSA *);
DSA *DSA_new(void);
void DSA_free(DSA *);
DSA_SIG *DSA_SIG_new(void);
void DSA_SIG_free(DSA_SIG *);
int i2d_DSA_SIG(const DSA_SIG *, unsigned char **);
DSA_SIG *d2i_DSA_SIG(DSA_SIG **, const unsigned char **, long);
int DSA_size(const DSA *);
"""
MACROS = """
int DSA_generate_parameters_ex(DSA *, int, unsigned char *, int,
int *, unsigned long *, BN_GENCB *);
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/dsa.h>
"""
TYPES = """
typedef struct dsa_st {
// prime number (public)
BIGNUM *p;
// 160-bit subprime, q | p-1 (public)
BIGNUM *q;
// generator of subgroup (public)
BIGNUM *g;
// private key x
BIGNUM *priv_key;
// public key y = g^x
BIGNUM *pub_key;
...;
} DSA;
typedef struct {
BIGNUM *r;
BIGNUM *s;
} DSA_SIG;
"""
FUNCTIONS = """
DSA *DSA_generate_parameters(int, unsigned char *, int, int *, unsigned long *,
void (*)(int, int, void *), void *);
int DSA_generate_key(DSA *);
DSA *DSA_new(void);
void DSA_free(DSA *);
DSA_SIG *DSA_SIG_new(void);
void DSA_SIG_free(DSA_SIG *);
int i2d_DSA_SIG(const DSA_SIG *, unsigned char **);
DSA_SIG *d2i_DSA_SIG(DSA_SIG **, const unsigned char **, long);
"""
MACROS = """
int DSA_generate_parameters_ex(DSA *, int, unsigned char *, int,
int *, unsigned long *, BN_GENCB *);
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| Python | 0 |
9bff4e9ed59d2c15b2da681476385274f5ef9059 | Fix create_sample_event | src/sentry/management/commands/create_sample_event.py | src/sentry/management/commands/create_sample_event.py |
"""
sentry.management.commands.create_sample_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.core.management.base import BaseCommand, CommandError, make_option
class Command(BaseCommand):
help = 'Creates a sample event in Sentry (if applicable)'
option_list = BaseCommand.option_list + (
make_option('--project', dest='project'),
make_option('--platform', dest='platform'),
)
def handle(self, **options):
from django.conf import settings
from sentry.constants import PLATFORM_LIST
from sentry.models import Project
from sentry.utils.samples import create_sample_event
if not options['project']:
project = Project.objects.get(id=settings.SENTRY_PROJECT)
else:
if options['project'].isdigit():
project = Project.objects.get(id=options['project'])
elif '/' in options['project']:
t_slug, p_slug = options['project'].split('/', 1)
project = Project.objects.get(slug=p_slug, team__slug=t_slug)
else:
raise CommandError('Project must be specified as team-slug/project-slug or a project id')
if options['platform'] not in PLATFORM_LIST:
raise CommandError('Invalid platform. Must specify one of: %s' % ', '.join(PLATFORM_LIST))
platform = options['platform'] or project.platform
event = create_sample_event(project, platform)
if not event:
raise CommandError('Unable to create an event for platform %r' % (str(platform),))
self.stdout.write('Event created: %s' % (event.group.get_absolute_url(),))
|
"""
sentry.management.commands.create_sample_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.core.management.base import BaseCommand, CommandError, make_option
class Command(BaseCommand):
help = 'Creates a sample event in Sentry (if applicable)'
option_list = BaseCommand.option_list + (
make_option('--project', dest='project'),
make_option('--platform', dest='platform'),
)
def handle(self, **options):
from django.conf import settings
from sentry.constants import PLATFORM_LIST
from sentry.models import Project
from sentry.utils.samples import create_sample_event
if not options['project']:
project = Project.objects.get(settings.SENTRY_DEFAULT_PROJECT)
else:
if options['project'].isdigit():
project = Project.objects.get(id=options['project'])
elif '/' in options['project']:
t_slug, p_slug = options['project'].split('/', 1)
project = Project.objects.get(slug=p_slug, team__slug=t_slug)
else:
raise CommandError('Project must be specified as team-slug/project-slug or a project id')
if options['platform'] not in PLATFORM_LIST:
raise CommandError('Invalid platform. Must specify one of: %s' % ', '.join(PLATFORM_LIST))
platform = options['platform'] or project.platform
event = create_sample_event(project, platform)
if not event:
raise CommandError('Unable to create an event for platform %r' % (str(platform),))
self.stdout.write('Event created: %s' % (event.group.get_absolute_url(),))
| Python | 0.000026 |
893e05540c640c4598477a39688a773556bebad9 | Update HDF5 to 1.8.16 | var/spack/packages/hdf5/package.py | var/spack/packages/hdf5/package.py | from spack import *
class Hdf5(Package):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "http://www.hdfgroup.org/HDF5/"
url = "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8.13/src/hdf5-1.8.13.tar.gz"
list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618')
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
version('1.8.13', 'c03426e9e77d7766944654280b467289')
depends_on("mpi")
depends_on("zlib")
# TODO: currently hard-coded to use OpenMPI
def install(self, spec, prefix):
configure(
"--prefix=%s" % prefix,
"--with-zlib=%s" % spec['zlib'].prefix,
"--enable-parallel",
"--enable-shared",
"CC=%s" % spec['mpich'].prefix.bin + "/mpicc",
"CXX=%s" % spec['mpich'].prefix.bin + "/mpic++")
make()
make("install")
def url_for_version(self, version):
v = str(version)
if version == Version("1.2.2"):
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + ".tar.gz"
elif version < Version("1.7"):
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + version.up_to(2) + "/hdf5-" + v + ".tar.gz"
else:
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + "/src/hdf5-" + v + ".tar.gz"
| from spack import *
class Hdf5(Package):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "http://www.hdfgroup.org/HDF5/"
url = "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8.13/src/hdf5-1.8.13.tar.gz"
list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
version('1.8.13', 'c03426e9e77d7766944654280b467289')
depends_on("mpi")
depends_on("zlib")
# TODO: currently hard-coded to use OpenMPI
def install(self, spec, prefix):
configure(
"--prefix=%s" % prefix,
"--with-zlib=%s" % spec['zlib'].prefix,
"--enable-parallel",
"--enable-shared",
"CC=%s" % spec['mpich'].prefix.bin + "/mpicc",
"CXX=%s" % spec['mpich'].prefix.bin + "/mpic++")
make()
make("install")
def url_for_version(self, version):
v = str(version)
if version == Version("1.2.2"):
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + ".tar.gz"
elif version < Version("1.7"):
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + version.up_to(2) + "/hdf5-" + v + ".tar.gz"
else:
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + "/src/hdf5-" + v + ".tar.gz"
| Python | 0 |
f28daad980dd95584dabe83a102ecdd0e1cac517 | remove reference to unused summaries file | music_spectrogram_diffusion/__init__.py | music_spectrogram_diffusion/__init__.py | # Copyright 2022 The Music Spectrogram Diffusion Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base module for Music Spectrogram Diffusion."""
from music_spectrogram_diffusion import audio_codecs
from music_spectrogram_diffusion import datasets
from music_spectrogram_diffusion import event_codec
from music_spectrogram_diffusion import inference
from music_spectrogram_diffusion import layers
from music_spectrogram_diffusion import metrics
from music_spectrogram_diffusion import note_sequences
from music_spectrogram_diffusion import preprocessors
from music_spectrogram_diffusion import run_length_encoding
from music_spectrogram_diffusion import tasks
from music_spectrogram_diffusion import vocabularies
| # Copyright 2022 The Music Spectrogram Diffusion Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base module for Music Spectrogram Diffusion."""
from music_spectrogram_diffusion import audio_codecs
from music_spectrogram_diffusion import datasets
from music_spectrogram_diffusion import event_codec
from music_spectrogram_diffusion import inference
from music_spectrogram_diffusion import layers
from music_spectrogram_diffusion import metrics
from music_spectrogram_diffusion import note_sequences
from music_spectrogram_diffusion import preprocessors
from music_spectrogram_diffusion import run_length_encoding
from music_spectrogram_diffusion import summaries
from music_spectrogram_diffusion import tasks
from music_spectrogram_diffusion import vocabularies
| Python | 0 |
5eb85dcc98adde698001405c60c44732964ec04a | Fix user messages not being sent | txircd/modules/cmd_privmsg_notice.py | txircd/modules/cmd_privmsg_notice.py | from twisted.words.protocols import irc
from txircd.modbase import Command
class MessageCommand(object):
def __init__(self, ircd):
self.ircd = ircd
def onUse(self, cmd, user, data):
if ("targetchan" not in data or not data["targetchan"]) and ("targetuser" not in data or not data["targetuser"]):
return
if "message" not in data or not data["message"]:
user.sendMessage(irc.ERR_NOTEXTTOSEND, ":No text to send")
return
channelModifiers = data["chanmod"]
message = data["message"]
for index, channel in enumerate(data["targetchan"]):
if channelModifiers[index]:
prefixLevel = self.prefixes[self.prefix_symbols[channelModifiers[index]]][0]
for u in channels.users:
if u != user and u.channels[channel.name]["status"] and self.prefixes[u.channels[channel.name]["status"][0]][0] >= prefixLevel:
u.sendMessage(cmd, ":{}".format(message), to="{}{}".format(channelModifiers[index], channel.name), prefix=user.prefix())
else:
for u in channel.users:
if u != user:
u.sendMessage(cmd, ":{}".format(message), to=channel.name, prefix=user.prefix())
for udata in data["targetuser"]:
udata.sendMessage(cmd, ":{}".format(message), prefix=user.prefix())
def processParams(self, cmd, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, cmd, ":You have not registered")
return {}
if not params:
user.sendMessage(irc.ERR_NEEDMOREPARAMS, cmd, ":Not enough parameters")
return {}
if len(params) < 2:
user.sendMessage(irc.ERR_NOTEXTTOSEND, ":No text to send")
return {}
targetChans = []
targetUsers = []
targetChanModifiers = []
for target in params[0].split(","):
if target in self.ircd.users:
targetUsers.append(self.ircd.users[target])
elif target in self.ircd.channels:
targetChans.append(self.ircd.channels[target])
targetChanModifiers.append("")
elif target[0] in self.ircd.prefix_symbols and target[1:] in self.ircd.channels:
targetChans.append(self.ircd.channels[target[1:]])
targetChanModifiers.append(target[0])
else:
user.sendMessage(irc.ERR_NOSUCHNICK, target, ":No such nick/channel")
return {
"user": user,
"targetchan": targetChans,
"chanmod": targetChanModifiers,
"targetuser": targetUsers,
"message": params[1]
}
class PrivMsgCommand(Command):
def __init__(self, msgHandler):
self.msg_handler = msgHandler
def onUse(self, user, data):
self.msg_handler.onUse("PRIVMSG", user, data)
def processParams(self, user, params):
return self.msg_handler.processParams("PRIVMSG", user, params)
class NoticeCommand(Command):
def __init__(self, msgHandler):
self.msg_handler = msgHandler
def onUse(self, user, data):
self.msg_handler.onUse("NOTICE", user, data)
def processParams(self, user, params):
return self.msg_handler.processParams("NOTICE", user, params)
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
messageHandler = MessageCommand(self.ircd)
return {
"commands": {
"PRIVMSG": PrivMsgCommand(messageHandler),
"NOTICE": NoticeCommand(messageHandler)
}
}
def cleanup(self):
del self.ircd.commands["PRIVMSG"]
del self.ircd.commands["NOTICE"] | from twisted.words.protocols import irc
from txircd.modbase import Command
class MessageCommand(object):
def __init__(self, ircd):
self.ircd = ircd
def onUse(self, cmd, user, data):
if ("targetchan" not in data or not data["targetchan"]) and ("targetuser" not in data or not data["targetuser"]):
return
if "message" not in data or not data["message"]:
user.sendMessage(irc.ERR_NOTEXTTOSEND, ":No text to send")
return
targetChans = data["targetchan"]
targetUsers = data["targetuser"]
channelModifiers = data["chanmod"]
message = data["message"]
for index, channel in enumerate(data["targetchan"]):
if channelModifiers[index]:
prefixLevel = self.prefixes[self.prefix_symbols[channelModifiers[index]]][0]
for u in channels.users:
if u != user and u.channels[channel.name]["status"] and self.prefixes[u.channels[channel.name]["status"][0]][0] >= prefixLevel:
u.sendMessage(cmd, ":{}".format(message), to="{}{}".format(channelModifiers[index], channel.name), prefix=user.prefix())
else:
for u in channel.users:
if u != user:
u.sendMessage(cmd, ":{}".format(message), to=channel.name, prefix=user.prefix())
def processParams(self, cmd, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, cmd, ":You have not registered")
return {}
if not params:
user.sendMessage(irc.ERR_NEEDMOREPARAMS, cmd, ":Not enough parameters")
return {}
if len(params) < 2:
user.sendMessage(irc.ERR_NOTEXTTOSEND, ":No text to send")
return {}
targetChans = []
targetUsers = []
targetChanModifiers = []
for target in params[0].split(","):
if target in self.ircd.users:
targetUsers.append(self.ircd.users[target])
elif target in self.ircd.channels:
targetChans.append(self.ircd.channels[target])
targetChanModifiers.append("")
elif target[0] in self.ircd.prefix_symbols and target[1:] in self.ircd.channels:
targetChans.append(self.ircd.channels[target[1:]])
targetChanModifiers.append(target[0])
else:
user.sendMessage(irc.ERR_NOSUCHNICK, target, ":No such nick/channel")
return {
"user": user,
"targetchan": targetChans,
"chanmod": targetChanModifiers,
"targetuser": targetUsers,
"message": params[1]
}
class PrivMsgCommand(Command):
def __init__(self, msgHandler):
self.msg_handler = msgHandler
def onUse(self, user, data):
self.msg_handler.onUse("PRIVMSG", user, data)
def processParams(self, user, params):
return self.msg_handler.processParams("PRIVMSG", user, params)
class NoticeCommand(Command):
def __init__(self, msgHandler):
self.msg_handler = msgHandler
def onUse(self, user, data):
self.msg_handler.onUse("NOTICE", user, data)
def processParams(self, user, params):
return self.msg_handler.processParams("NOTICE", user, params)
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
messageHandler = MessageCommand(self.ircd)
return {
"commands": {
"PRIVMSG": PrivMsgCommand(messageHandler),
"NOTICE": NoticeCommand(messageHandler)
}
}
def cleanup(self):
del self.ircd.commands["PRIVMSG"]
del self.ircd.commands["NOTICE"] | Python | 0.000188 |
376b8aa5b77066e06c17f41d65fe32a3c2bdef1f | Add a default value to the header limit | geo.py | geo.py | #! /usr/bin/python3
# -*- coding-utf-8 -*-
"""
This script transform a md into a plain html in the context of a
documentation for Kit&Pack.
"""
import mmap
import yaml
print("---------------------------- geo --")
print("-- by antoine.delhomme@espci.org --")
print("-----------------------------------")
doc_in = "./001-v2-doc.md"
class geoReader():
def __init__(self, doc_in):
self.doc_in = doc_in
self.header = None
self.header_limit = -1
def __enter__(self):
"""Open the file.
"""
self.f = open(self.doc_in, 'r')
return self
def __exit__(self, type, value, traceback):
"""Close the file.
"""
self.f.close()
def parseHeader(self):
"""Parse the header of the file.
"""
s = mmap.mmap(self.f.fileno(), 0, access=mmap.ACCESS_READ)
self.header_limit = s.find(b'---')
if self.header_limit != -1:
self.header = yaml.load(s[0:self.header_limit])
print(self.header['name'])
else:
print("Cannot load the header")
# Read the document
with geoReader(doc_in) as g:
g.parseHeader()
| #! /usr/bin/python3
# -*- coding-utf-8 -*-
"""
This script transform a md into a plain html in the context of a
documentation for Kit&Pack.
"""
import mmap
import yaml
print("---------------------------- geo --")
print("-- by antoine.delhomme@espci.org --")
print("-----------------------------------")
doc_in = "./001-v2-doc.md"
class geoReader():
def __init__(self, doc_in):
self.doc_in = doc_in
self.header = None
def __enter__(self):
"""Open the file.
"""
self.f = open(self.doc_in, 'r')
return self
def __exit__(self, type, value, traceback):
"""Close the file.
"""
self.f.close()
def parseHeader(self):
"""Parse the header of the file.
"""
s = mmap.mmap(self.f.fileno(), 0, access=mmap.ACCESS_READ)
self.header_limit = s.find(b'---')
if self.header_limit != -1:
self.header = yaml.load(s[0:self.header_limit])
print(self.header['name'])
else:
print("Cannot load the header")
# Read the document
with geoReader(doc_in) as g:
g.parseHeader()
| Python | 0.000001 |
8f3ff0cfd70bfe4eaa9e017323971bad453c93f5 | set edit as bot | trunk/toolserver/pui.py | trunk/toolserver/pui.py | #!usr/bin/python
# -*- coding: utf-8 -*
#
# (C) Legoktm 2008-2009, MIT License
#
import re, sys, os
sys.path.append(os.environ['HOME'] + '/pythonwikibot')
#sys.path.append('/Users/kman/projects/pywikibot')
import wiki
wiki.setUser('Legobot')
page = wiki.Page('Wikipedia:Possibly unfree images')
try:
wikitext = state0 = page.get()
except wiki.IsRedirectPage:
page = wiki.Page('Wikipedia:Possibly unfree files')
wikitext = state0 = page.get()
wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext)
EditMsg = 'Adding new day to holding cell'
wiki.showDiff(state0, wikitext)
page.put(wikitext,EditMsg, bot=True)
| #!usr/bin/python
# -*- coding: utf-8 -*
#
# (C) Legoktm 2008-2009, MIT License
#
import re, sys, os
sys.path.append(os.environ['HOME'] + '/pythonwikibot')
#sys.path.append('/Users/kman/projects/pywikibot')
import wiki
wiki.setUser('Legobot')
page = wiki.Page('Wikipedia:Possibly unfree images')
try:
wikitext = state0 = page.get()
except wiki.IsRedirectPage:
page = wiki.Page('Wikipedia:Possibly unfree files')
wikitext = state0 = page.get()
wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext)
EditMsg = 'Adding new day to holding cell'
wiki.showDiff(state0, wikitext)
page.put(wikitext,EditMsg)
| Python | 0.000002 |
050cc7a74e68b0515ceac1f53cbc20aa6e6cd498 | Create a Session object. | octohat/connection.py | octohat/connection.py | # Copyright (c) 2013 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import requests
from .response import parse_response
class Pager(object):
def __init__(self, conn, uri, params, max_pages=0):
"""Iterator object handling pagination of Connection.send (method: GET)
conn (octohub.Connection): Connection object
uri (str): Request URI (e.g., /user/issues)
params (dict): Parameters to include in request
max_pages (int): Maximum amount of pages to get (0 for all)
"""
self.conn = conn
self.uri = uri
self.params = params
self.max_pages = max_pages
self.count = 0
def __iter__(self):
while True:
self.count += 1
response = self.conn.send('GET', self.uri, self.params)
yield response
if self.count == self.max_pages:
break
if not 'next' in list(response.parsed_link.keys()):
break
self.uri = response.parsed_link.next.uri
self.params = response.parsed_link.next.params
class Connection(object):
def __init__(self, token=None):
"""OctoHub connection
token (str): GitHub Token (anonymous if not provided)
"""
self.endpoint = 'https://api.github.com'
self.headers = {'User-Agent': 'octohub'}
self.session = requests.Session()
if token:
self.headers['Authorization'] = 'token %s' % token
def send(self, method, uri, params={}, data=None):
"""Prepare and send request
method (str): Request HTTP method (e.g., GET, POST, DELETE, ...)
uri (str): Request URI (e.g., /user/issues)
params (dict): Parameters to include in request
data (str | file type object): data to include in request
returns: requests.Response object, including:
response.parsed (AttrDict): parsed response when applicable
response.parsed_link (AttrDict): parsed header link when applicable
http://docs.python-requests.org/en/latest/api/#requests.Response
"""
url = self.endpoint + uri
kwargs = {'headers': self.headers, 'params': params, 'data': data}
response = self.session.request(method, url, **kwargs)
return parse_response(response)
| # Copyright (c) 2013 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import requests
from .response import parse_response
class Pager(object):
def __init__(self, conn, uri, params, max_pages=0):
"""Iterator object handling pagination of Connection.send (method: GET)
conn (octohub.Connection): Connection object
uri (str): Request URI (e.g., /user/issues)
params (dict): Parameters to include in request
max_pages (int): Maximum amount of pages to get (0 for all)
"""
self.conn = conn
self.uri = uri
self.params = params
self.max_pages = max_pages
self.count = 0
def __iter__(self):
while True:
self.count += 1
response = self.conn.send('GET', self.uri, self.params)
yield response
if self.count == self.max_pages:
break
if not 'next' in list(response.parsed_link.keys()):
break
self.uri = response.parsed_link.next.uri
self.params = response.parsed_link.next.params
class Connection(object):
def __init__(self, token=None):
"""OctoHub connection
token (str): GitHub Token (anonymous if not provided)
"""
self.endpoint = 'https://api.github.com'
self.headers = {'User-Agent': 'octohub'}
if token:
self.headers['Authorization'] = 'token %s' % token
def send(self, method, uri, params={}, data=None):
"""Prepare and send request
method (str): Request HTTP method (e.g., GET, POST, DELETE, ...)
uri (str): Request URI (e.g., /user/issues)
params (dict): Parameters to include in request
data (str | file type object): data to include in request
returns: requests.Response object, including:
response.parsed (AttrDict): parsed response when applicable
response.parsed_link (AttrDict): parsed header link when applicable
http://docs.python-requests.org/en/latest/api/#requests.Response
"""
url = self.endpoint + uri
kwargs = {'headers': self.headers, 'params': params, 'data': data}
response = requests.request(method, url, **kwargs)
return parse_response(response)
| Python | 0 |
20cf699df5d81eec071254cdaac13ad7ad49909a | fix reducer dereference if replace_reducer is used | pydux/create_store.py | pydux/create_store.py | """
python + redux == pydux
Redux: http://redux.js.org
A somewhat literal translation of Redux.
Closures in Python are over references, as opposed to
names in JavaScript, so they are read-only. Single-
element arrays are used to create read/write closures.
"""
class ActionTypes(object):
INIT = '@@redux/INIT'
class StoreDict(dict):
def get_state(self):
return self['get_state']()
def subscribe(self, listener):
return self['subscribe'](listener)
def dispatch(self, action):
return self['dispatch'](action)
def replace_reducer(self, next_reducer):
return self['replace_reducer'](next_reducer)
def create_store(reducer, initial_state=None, enhancer=None):
"""
redux in a nutshell.
observable has been omitted.
Args:
reducer: root reducer function for the state tree
initial_state: optional initial state data
enhancer: optional enhancer function for middleware etc.
Returns:
a Pydux store
"""
if enhancer is not None:
if not hasattr(enhancer, '__call__'):
raise TypeError('Expected the enhancer to be a function.')
return enhancer(create_store)(reducer)
if not hasattr(reducer, '__call__'):
raise TypeError('Expected the reducer to be a function.')
# single-element arrays for r/w closure
current_reducer = [reducer]
current_state = [initial_state]
current_listeners = [[]]
next_listeners = [current_listeners[0]]
is_dispatching = [False]
def ensure_can_mutate_next_listeners():
if next_listeners[0] == current_listeners[0]:
next_listeners[0] = current_listeners[0][:]
def get_state():
return current_state[0]
def subscribe(listener):
if not hasattr(listener, '__call__'):
raise TypeError('Expected listener to be a function.')
is_subscribed = [True] # r/w closure
ensure_can_mutate_next_listeners()
next_listeners[0].append(listener)
def unsubcribe():
if not is_subscribed[0]:
return
is_subscribed[0] = False
ensure_can_mutate_next_listeners()
index = next_listeners[0].index(listener)
next_listeners[0].pop(index)
return unsubcribe
def dispatch(action):
if not isinstance(action, dict):
raise TypeError('Actions must be a dict. '
'Use custom middleware for async actions.')
if 'type' not in action:
raise ValueError('Actions must have a "type" property. '
'Have you misspelled a constant?')
if is_dispatching[0]:
raise Exception('Reducers may not dispatch actions.')
try:
is_dispatching[0] = True
current_state[0] = current_reducer[0](current_state[0], action)
finally:
is_dispatching[0] = False
listeners = current_listeners[0] = next_listeners[0]
for listener in listeners:
listener()
return action
def replace_reducer(next_reducer):
if not hasattr(next_reducer, '__call__'):
raise TypeError('Expected the next_reducer to be a function')
current_reducer[0] = next_reducer
dispatch({'type': ActionTypes.INIT})
dispatch({'type': ActionTypes.INIT})
return StoreDict(
dispatch=dispatch,
subscribe=subscribe,
get_state=get_state,
replace_reducer=replace_reducer,
)
| """
python + redux == pydux
Redux: http://redux.js.org
A somewhat literal translation of Redux.
Closures in Python are over references, as opposed to
names in JavaScript, so they are read-only. Single-
element arrays are used to create read/write closures.
"""
class ActionTypes(object):
INIT = '@@redux/INIT'
class StoreDict(dict):
def get_state(self):
return self['get_state']()
def subscribe(self, listener):
return self['subscribe'](listener)
def dispatch(self, action):
return self['dispatch'](action)
def replace_reducer(self, next_reducer):
return self['replace_reducer'](next_reducer)
def create_store(reducer, initial_state=None, enhancer=None):
"""
redux in a nutshell.
observable has been omitted.
Args:
reducer: root reducer function for the state tree
initial_state: optional initial state data
enhancer: optional enhancer function for middleware etc.
Returns:
a Pydux store
"""
if enhancer is not None:
if not hasattr(enhancer, '__call__'):
raise TypeError('Expected the enhancer to be a function.')
return enhancer(create_store)(reducer)
if not hasattr(reducer, '__call__'):
raise TypeError('Expected the reducer to be a function.')
# single-element arrays for r/w closure
current_reducer = [reducer]
current_state = [initial_state]
current_listeners = [[]]
next_listeners = [current_listeners[0]]
is_dispatching = [False]
def ensure_can_mutate_next_listeners():
if next_listeners[0] == current_listeners[0]:
next_listeners[0] = current_listeners[0][:]
def get_state():
return current_state[0]
def subscribe(listener):
if not hasattr(listener, '__call__'):
raise TypeError('Expected listener to be a function.')
is_subscribed = [True] # r/w closure
ensure_can_mutate_next_listeners()
next_listeners[0].append(listener)
def unsubcribe():
if not is_subscribed[0]:
return
is_subscribed[0] = False
ensure_can_mutate_next_listeners()
index = next_listeners[0].index(listener)
next_listeners[0].pop(index)
return unsubcribe
def dispatch(action):
if not isinstance(action, dict):
raise TypeError('Actions must be a dict. '
'Use custom middleware for async actions.')
if 'type' not in action:
raise ValueError('Actions must have a "type" property. '
'Have you misspelled a constant?')
if is_dispatching[0]:
raise Exception('Reducers may not dispatch actions.')
try:
is_dispatching[0] = True
current_state[0] = reducer(current_state[0], action)
finally:
is_dispatching[0] = False
listeners = current_listeners[0] = next_listeners[0]
for listener in listeners:
listener()
return action
def replace_reducer(next_reducer):
if not hasattr(next_reducer, '__call__'):
raise TypeError('Expected the next_reducer to be a function')
current_reducer[0] = next_reducer
dispatch({'type': ActionTypes.INIT})
dispatch({'type': ActionTypes.INIT})
return StoreDict(
dispatch=dispatch,
subscribe=subscribe,
get_state=get_state,
replace_reducer=replace_reducer,
)
| Python | 0 |
fb51b056ce909028ee4b0cc3e790ae202d8711af | Update P06_factorialLog disable logging | books/AutomateTheBoringStuffWithPython/Chapter10/P06_factorialLog.py | books/AutomateTheBoringStuffWithPython/Chapter10/P06_factorialLog.py | # This program calculates factorial and logs debug messages
import logging
logging.basicConfig(level=logging.DEBUG, format=" %(asctime)s - %(levelname)s - %(message)s")
logging.disable(logging.CRITICAL) # Stop logging, comment out to debug
logging.debug("Start of program")
def factorial(n):
logging.debug("Start of factorial(%s%%)" % n)
total = 1
for i in range(1, n + 1):
total *= i
logging.debug("i is " + str(i) + ", total is " + str(total))
logging.debug("End of factorial(%s%%)" % n)
return total
print(factorial(5))
logging.debug("End of program")
| # This program calculates factorial and logs debug messages
import logging
logging.basicConfig(level=logging.DEBUG, format=" %(asctime)s - %(levelname)s - %(message)s")
logging.debug("Start of program")
def factorial(n):
logging.debug("Start of factorial(%s%%)" % n)
total = 1
for i in range(1, n + 1):
total *= i
logging.debug("i is " + str(i) + ", total is " + str(total))
logging.debug("End of factorial(%s%%)" % n)
return total
print(factorial(5))
logging.debug("End of program")
| Python | 0 |
acee3e41b45198af4b8a11f5a75bcd62e49864e2 | fix path | tumblr/spiders/index.py | tumblr/spiders/index.py | # encoding:utf-8
import json
import scrapy
from lxml import etree
from scrapy.http.request import Request
stream_cursor = "eyJGb2xsb3dlZFNlYXJjaFBvc3QiOltdLCJiZWZvcmVfaWQiOiIxNjI2ODY4NDM3NDMifQ%3D%3D"
with open("config.json", "r") as f:
configData = json.loads(f.read(-1))
default_cookie = configData["cookies"]
maxPage = configData["maxPage"]
cookieObj = {}
cookieList = default_cookie.split(";")
for pair in cookieList:
cookieObj[pair.split("=")[0]] = pair.split("=")[1]
video_url_list = set()
start_url_list = []
class Index(scrapy.spiders.Spider):
name = "index"
allowed_domains = ["tumblr.com", "taobao.com", "tmall.com"]
start_urls = [
"https://www.tumblr.com/dashboard"
]
def start_requests(self):
for url in self.start_urls:
yield Request(url, cookies=cookieObj)
def parse(self, response):
if len(response.url.split("svc")) == 1:
body = response.body
html = etree.HTML(body)
video_list = html.xpath("//source")
for video in video_list:
video_name = video.xpath("@src")[0].split("tumblr_")[1].split("/")[0]
video_url = "https://vtt.tumblr.com/tumblr_" + video_name + ".mp4"
video_url_list.add(video_url)
next_index = "2"
next_timestamp = response.body.split("/dashboard/2")[1].split("\"")[0][1:]
url = "https://www.tumblr.com/svc/dashboard/" + next_index + "/" + next_timestamp + \
"?nextAdPos=8&stream_cursor=" + stream_cursor
yield Request(url, callback=self.parse, cookies=cookieObj)
else:
body = json.loads(response.body)['response']['DashboardPosts']['body']
html = etree.HTML(body)
video_list = html.xpath("//source")
for video in video_list:
video_name = video.xpath("@src")[0].split("tumblr_")[1].split("/")[0]
video_url = "https://vtt.tumblr.com/tumblr_" + video_name + ".mp4"
video_url_list.add(video_url)
with open("data.json", 'wb') as f:
try:
f.write(json.dumps(list(video_url_list)))
except Exception, e:
print("error in result", e)
try:
next_index = json.loads(response.body)['meta']['tumblr_next_page'].split('/')[3]
if int(next_index) > int(maxPage):
return
next_timestamp = json.loads(response.body)['meta']['tumblr_next_page'].split('/')[4]
url = "https://www.tumblr.com/svc/dashboard/" + next_index + "/" + next_timestamp + \
"?nextAdPos=8&stream_cursor=" + stream_cursor
yield Request(url, callback=self.parse, cookies=cookieObj)
except Exception, e:
print("error in result", e)
| # encoding:utf-8
import json
import requests
import scrapy
from lxml import etree
from scrapy.http.request import Request
stream_cursor = "eyJGb2xsb3dlZFNlYXJjaFBvc3QiOltdLCJiZWZvcmVfaWQiOiIxNjI2ODY4NDM3NDMifQ%3D%3D"
with open("../../config.json", "r") as f:
configData = json.loads(f.read(-1))
default_cookie = configData["cookies"]
maxPage = configData["maxPage"]
cookieObj = {}
cookieList = default_cookie.split(";")
for pair in cookieList:
cookieObj[pair.split("=")[0]] = pair.split("=")[1]
video_url_list = set()
start_url_list = []
def fetch_stream(url, file_name):
r = requests.get(url)
with open("../../download" + file_name, "wb") as code:
code.write(r.content)
class Index(scrapy.spiders.Spider):
name = "index"
allowed_domains = ["tumblr.com", "taobao.com", "tmall.com"]
start_urls = [
"https://www.tumblr.com/dashboard"
]
def start_requests(self):
for url in self.start_urls:
yield Request(url, cookies=cookieObj)
def parse(self, response):
if len(response.url.split("svc")) == 1:
body = response.body
html = etree.HTML(body)
video_list = html.xpath("//source")
for video in video_list:
video_name = video.xpath("@src")[0].split("tumblr_")[1].split("/")[0]
video_url = "https://vtt.tumblr.com/tumblr_" + video_name + ".mp4"
video_url_list.add(video_url)
next_index = "2"
next_timestamp = response.body.split("/dashboard/2")[1].split("\"")[0][1:]
url = "https://www.tumblr.com/svc/dashboard/" + next_index + "/" + next_timestamp + \
"?nextAdPos=8&stream_cursor=" + stream_cursor
yield Request(url, callback=self.parse, cookies=cookieObj)
else:
body = json.loads(response.body)['response']['DashboardPosts']['body']
html = etree.HTML(body)
video_list = html.xpath("//source")
for video in video_list:
video_name = video.xpath("@src")[0].split("tumblr_")[1].split("/")[0]
video_url = "https://vtt.tumblr.com/tumblr_" + video_name + ".mp4"
video_url_list.add(video_url)
with open("../../data.json", 'wb') as f:
try:
f.write(json.dumps(list(video_url_list)))
except Exception, e:
print("error in result", e)
try:
next_index = json.loads(response.body)['meta']['tumblr_next_page'].split('/')[3]
if int(next_index) > int(maxPage):
if autoDownload:
yield self.final()
return
next_timestamp = json.loads(response.body)['meta']['tumblr_next_page'].split('/')[4]
url = "https://www.tumblr.com/svc/dashboard/" + next_index + "/" + next_timestamp + \
"?nextAdPos=8&stream_cursor=" + stream_cursor
yield Request(url, callback=self.parse, cookies=cookieObj)
except Exception, e:
print("error in result", e)
| Python | 0.000017 |
b202b5faa2f378d3c2b771914c043255e8e66a61 | Update venmo meta | modules/sfp_venmo.py | modules/sfp_venmo.py | #-------------------------------------------------------------------------------
# Name: sfp_venmo
# Purpose: Gather user information from Venmo API.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-07-16
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_venmo(SpiderFootPlugin):
"""Venmo:Footprint,Investigate,Passive:Social Media::Gather user information from Venmo API."""
meta = {
'name': "Venmo",
'summary': "Gather user information from Venmo API.",
'flags': [ "" ],
'useCases': [ "Footprint", "Investigate", "Passive" ],
'categories': [ "Social Media" ],
'dataSource': {
'website': "https://venmo.com/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [],
'favIcon': "https://d1v6x81qdeozhc.cloudfront.net/static/images/logo/apple-touch-icon-1a10ee4b947b728d54265ac8c5084f78.png",
'logo': "https://d1v6x81qdeozhc.cloudfront.net/static/images/logo/apple-touch-icon-1a10ee4b947b728d54265ac8c5084f78.png",
'description': "Venmo is a digital wallet that allows you to send money and make purchases at approved merchants\n",
}
}
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [ 'USERNAME' ]
# What events this module produces
def producedEvents(self):
return [ 'RAW_RIR_DATA' ]
# Query Venmo API
def query(self, qry):
res = self.sf.fetchUrl('https://api.venmo.com/v1/users/' + qry,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.sf.debug('No response from api.venmo.com')
return None
try:
data = json.loads(res['content'])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
json_data = data.get('data')
if not json_data:
self.sf.debug(qry + " is not a valid Venmo username")
return None
return json_data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, %s, from %s" % (eventName, srcModuleName))
data = self.query(eventData)
if not data:
return None
e = SpiderFootEvent('RAW_RIR_DATA', str(data), self.__name__, event)
self.notifyListeners(e)
display_name = data.get('display_name')
if display_name:
evt = SpiderFootEvent('RAW_RIR_DATA',
'Possible full name: ' + display_name,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_venmo class
| #-------------------------------------------------------------------------------
# Name: sfp_venmo
# Purpose: Gather user information from Venmo API.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-07-16
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_venmo(SpiderFootPlugin):
"""Venmo:Footprint,Investigate,Passive:Social Media::Gather user information from Venmo API."""
meta = {
'name': "Venmo",
'summary': "Gather user information from Venmo API.",
'flags': [ "" ],
'useCases': [ "Footprint", "Investigate", "Passive" ],
'categories': [ "Social Media" ]
}
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [ 'USERNAME' ]
# What events this module produces
def producedEvents(self):
return [ 'RAW_RIR_DATA' ]
# Query Venmo API
def query(self, qry):
res = self.sf.fetchUrl('https://api.venmo.com/v1/users/' + qry,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.sf.debug('No response from api.venmo.com')
return None
try:
data = json.loads(res['content'])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
json_data = data.get('data')
if not json_data:
self.sf.debug(qry + " is not a valid Venmo username")
return None
return json_data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, %s, from %s" % (eventName, srcModuleName))
data = self.query(eventData)
if not data:
return None
e = SpiderFootEvent('RAW_RIR_DATA', str(data), self.__name__, event)
self.notifyListeners(e)
display_name = data.get('display_name')
if display_name:
evt = SpiderFootEvent('RAW_RIR_DATA',
'Possible full name: ' + display_name,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_venmo class
| Python | 0.000001 |
78647441c861eb59d1d25bd6284a2814903a7783 | Read real tab_size from settings instead of using constant value | plugin/formatting.py | plugin/formatting.py | import sublime_plugin
from .core.protocol import Request, Range
from .core.url import filename_to_uri
from .core.clients import client_for_view
from .core.configurations import is_supported_view
class LspFormatDocumentCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentFormattingProvider'):
return True
return False
def run(self, edit):
client = client_for_view(self.view)
if client:
pos = self.view.sel()[0].begin()
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"options": {
"tabSize": self.view.settings().get("tab_size", 4),
"insertSpaces": True
}
}
request = Request.formatting(params)
client.send_request(
request, lambda response: self.handle_response(response, pos))
def handle_response(self, response, pos):
self.view.run_command('lsp_apply_document_edit',
{'changes': response})
class LspFormatDocumentRangeCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentRangeFormattingProvider'):
if len(self.view.sel()) == 1:
region = self.view.sel()[0]
if region.begin() != region.end():
return True
return False
def run(self, _):
client = client_for_view(self.view)
if client:
region = self.view.sel()[0]
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"range": Range.from_region(self.view, region).to_lsp(),
"options": {
"tabSize": self.view.settings().get("tab_size", 4),
"insertSpaces": True
}
}
client.send_request(Request.rangeFormatting(params),
lambda response: self.view.run_command('lsp_apply_document_edit',
{'changes': response}))
| import sublime_plugin
from .core.protocol import Request, Range
from .core.url import filename_to_uri
from .core.clients import client_for_view
from .core.configurations import is_supported_view
class LspFormatDocumentCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentFormattingProvider'):
return True
return False
def run(self, edit):
client = client_for_view(self.view)
if client:
pos = self.view.sel()[0].begin()
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"options": {
"tabSize": 4, # TODO: Fetch these from the project settings / global settings
"insertSpaces": True
}
}
request = Request.formatting(params)
client.send_request(
request, lambda response: self.handle_response(response, pos))
def handle_response(self, response, pos):
self.view.run_command('lsp_apply_document_edit',
{'changes': response})
class LspFormatDocumentRangeCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentRangeFormattingProvider'):
if len(self.view.sel()) == 1:
region = self.view.sel()[0]
if region.begin() != region.end():
return True
return False
def run(self, _):
client = client_for_view(self.view)
if client:
region = self.view.sel()[0]
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"range": Range.from_region(self.view, region).to_lsp(),
"options": {
"tabSize": 4, # TODO: Fetch these from the project settings / global settings
"insertSpaces": True
}
}
client.send_request(Request.rangeFormatting(params),
lambda response: self.view.run_command('lsp_apply_document_edit',
{'changes': response}))
| Python | 0 |
aad2232c2dadf309d83ad38978d26f80c2bb5782 | Return more informative message when either path or filename is not found | pysteps/io/archive.py | pysteps/io/archive.py | """Utilities for finding archived files that match the given criteria."""
from datetime import datetime, timedelta
import fnmatch
import os
def find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext, timestep,
num_prev_files=0, num_next_files=0):
"""List input files whose timestamp matches the given date.
Parameters
----------
date : datetime.datetime
The given date.
root_path : str
The root path to search the input files.
path_fmt : str
Path format. It may consist of directory names separated by '/',
date/time specifiers beginning with '%' (e.g. %Y/%m/%d) and wildcards
(?) that match any single character.
fn_pattern : str
The name pattern of the input files without extension. The pattern can
contain time specifiers (e.g. %H, %M and %S).
fn_ext : str
Extension of the input files.
timestep : float
Time step between consecutive input files (minutes).
num_prev_files : int
Optional, number of previous files to find before the given timestamp.
num_next_files : int
Optional, number of future files to find after the given timestamp.
Returns
-------
out : tuple
If num_prev_files=0 and num_next_files=0, return a pair containing the
found file name and the corresponding timestamp as a datetime.datetime
object. Otherwise, return a tuple of two lists, the first one for the
file names and the second one for the corresponding timestemps. The lists
are sorted in ascending order with respect to timestamp. A None value is
assigned if a file name corresponding to a given timestamp is not found.
"""
filenames = []
timestamps = []
for i in range(num_prev_files+num_next_files+1):
curdate = date + timedelta(minutes=num_next_files*timestep) - timedelta(minutes=i*timestep)
fn = _find_matching_filename(curdate, root_path, path_fmt, fn_pattern, fn_ext)
filenames.append(fn)
timestamps.append(curdate)
if all(filename is None for filename in filenames):
raise IOError("no input data found in %s" % root_path)
if (num_prev_files+num_next_files) > 0:
return (filenames[::-1], timestamps[::-1])
else:
return (filenames, timestamps)
def _find_matching_filename(date, root_path, path_fmt, fn_pattern, fn_ext):
path = _generate_path(date, root_path, path_fmt)
fn = None
if os.path.exists(path):
fn = datetime.strftime(date, fn_pattern) + '.' + fn_ext
# test for wildcars
if '?' in fn:
filenames = os.listdir(path)
if len(filenames) > 0:
for filename in filenames:
if fnmatch.fnmatch(filename, fn):
fn = filename
break
fn = os.path.join(path, fn)
if os.path.exists(fn):
fn = fn
else:
print('filename for date %s not found in %s' % (date,path))
fn = None
else:
print('path', path, 'not found.')
return fn
def _generate_path(date, root_path, path_fmt):
f = lambda t: datetime.strftime(date, t) if t[0] == '%' else t
if path_fmt != "":
tokens = [f(t) for t in path_fmt.split('/')]
subpath = os.path.join(*tokens)
return os.path.join(root_path, subpath)
else:
return root_path
| """Utilities for finding archived files that match the given criteria."""
from datetime import datetime, timedelta
import fnmatch
import os
def find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext, timestep,
num_prev_files=0, num_next_files=0):
"""List input files whose timestamp matches the given date.
Parameters
----------
date : datetime.datetime
The given date.
root_path : str
The root path to search the input files.
path_fmt : str
Path format. It may consist of directory names separated by '/',
date/time specifiers beginning with '%' (e.g. %Y/%m/%d) and wildcards
(?) that match any single character.
fn_pattern : str
The name pattern of the input files without extension. The pattern can
contain time specifiers (e.g. %H, %M and %S).
fn_ext : str
Extension of the input files.
timestep : float
Time step between consecutive input files (minutes).
num_prev_files : int
Optional, number of previous files to find before the given timestamp.
num_next_files : int
Optional, number of future files to find after the given timestamp.
Returns
-------
out : tuple
If num_prev_files=0 and num_next_files=0, return a pair containing the
found file name and the corresponding timestamp as a datetime.datetime
object. Otherwise, return a tuple of two lists, the first one for the
file names and the second one for the corresponding timestemps. The lists
are sorted in ascending order with respect to timestamp. A None value is
assigned if a file name corresponding to a given timestamp is not found.
"""
filenames = []
timestamps = []
for i in range(num_prev_files+num_next_files+1):
curdate = date + timedelta(minutes=num_next_files*timestep) - timedelta(minutes=i*timestep)
fn = _find_matching_filename(curdate, root_path, path_fmt, fn_pattern, fn_ext)
filenames.append(fn)
timestamps.append(curdate)
if all(filename is None for filename in filenames):
raise IOError("no input data found in %s" % root_path)
if (num_prev_files+num_next_files) > 0:
return (filenames[::-1], timestamps[::-1])
else:
return (filenames, timestamps)
def _find_matching_filename(date, root_path, path_fmt, fn_pattern, fn_ext):
path = _generate_path(date, root_path, path_fmt)
fn = None
if os.path.exists(path):
fn = datetime.strftime(date, fn_pattern) + '.' + fn_ext
# test for wildcars
if '?' in fn:
filenames = os.listdir(path)
if len(filenames) > 0:
for filename in filenames:
if fnmatch.fnmatch(filename, fn):
fn = filename
break
fn = os.path.join(path, fn)
fn = fn if os.path.exists(fn) else None
return fn
def _generate_path(date, root_path, path_fmt):
f = lambda t: datetime.strftime(date, t) if t[0] == '%' else t
if path_fmt != "":
tokens = [f(t) for t in path_fmt.split('/')]
subpath = os.path.join(*tokens)
return os.path.join(root_path, subpath)
else:
return root_path
| Python | 0.000005 |
6facb0f33a8cf53041d9fa1562376e43e6d6194f | add init for smiles2graph | ogb/utils/__init__.py | ogb/utils/__init__.py | try:
from .mol import smiles2graph
except ImportError:
pass | Python | 0.000003 | |
fdae17a50223c2f9b8ba4a665fc24726e2c2ce14 | Add auth header to the fixture loader | tests/lib/es_tools.py | tests/lib/es_tools.py | """ Commands for interacting with Elastic Search """
# pylint: disable=broad-except
from os.path import join
import requests
from lib.tools import TEST_FOLDER
def es_is_available():
""" Test if Elastic Search is running """
try:
return (
requests.get("http://localhost:9200", auth=("elastic", "changeme")).json()[
"tagline"
]
== "You Know, for Search"
)
except Exception:
return False
def load_json_file(filename):
""" Load JSON file into Elastic Search """
url = "http://localhost:9200/_bulk"
path = join(TEST_FOLDER, "data", filename)
headers = {"Content-Type": "application/x-ndjson"}
with open(path, "r") as handle:
body = handle.read().encode(encoding="utf-8")
return requests.post(
url, headers=headers, data=body, auth=("elastic", "changeme")
)
| """ Commands for interacting with Elastic Search """
# pylint: disable=broad-except
from os.path import join
import requests
from lib.tools import TEST_FOLDER
def es_is_available():
""" Test if Elastic Search is running """
try:
return (
requests.get("http://localhost:9200").json()["tagline"]
== "You Know, for Search"
)
except Exception:
return False
def load_json_file(filename):
""" Load JSON file into Elastic Search """
url = "http://localhost:9200/_bulk"
path = join(TEST_FOLDER, "data", filename)
headers = {"Content-Type": "application/x-ndjson"}
with open(path, "r") as handle:
body = handle.read().encode(encoding="utf-8")
return requests.post(url, headers=headers, data=body)
| Python | 0 |
13f802e959013cf31148399321dd84cc4070bf28 | Make input image update on change | qtgui/panels/panel.py | qtgui/panels/panel.py | import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QComboBox
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGroupBox, QSplitter
from qtgui.widgets import QActivationView
from qtgui.widgets import QInputSelector, QInputInfoBox, QImageView
from qtgui.widgets import QNetworkView, QNetworkInfoBox
class Panel(QWidget):
'''Base class for different visualisation panels. In the future, the type of
visualisation should be a component in the panel, not a separate panel
class.
'''
def __init__(self, parent=None):
'''Initialization of the ActivationsView.
Parameters
----------
parent : QWidget
The parent argument is sent to the QWidget constructor.
'''
super().__init__(parent)
def initUI(self):
'''Initialise all UI elements. These are
* The ``QImageView`` showing the current input image
* A ``QInputSelector`` to show input controls
* A ``QNetworkView``, a widget to select a layer in a network
* A ``QInputInfoBox`` to display information about the input
'''
########################################################################
# User input #
########################################################################
self._input_view = QImageView(self)
# FIXME[layout]
# keep image view square (TODO: does this make sense for every input?)
self._input_view.heightForWidth = lambda w: w
self._input_view.hasHeightForWidth = lambda: True
# QNetworkInfoBox: a widget to select the input to the network
# (data array, image directory, webcam, ...)
# the 'next' button: used to load the next image
self._input_selector = QInputSelector()
self._input_info = QInputInfoBox()
# FIXME[layout]
self._input_info.setMinimumWidth(300)
input_layout = QVBoxLayout()
# FIXME[layout]
input_layout.setSpacing(0)
input_layout.setContentsMargins(0, 0, 0, 0)
input_layout.addWidget(self._input_view)
input_layout.addWidget(self._input_info)
input_layout.addWidget(self._input_selector)
input_box = QGroupBox('Input')
input_box.setLayout(input_layout)
self._input_box = input_box
########################################################################
# Network #
########################################################################
# networkview: a widget to select a network
self._network_view = QNetworkView()
self._network_selector = QComboBox()
self._network_layout = QVBoxLayout()
self._network_layout.addWidget(self._network_selector)
self._network_layout.addWidget(self._network_view)
self._network_box = QGroupBox('Network')
self._network_box.setLayout(self._network_layout)
def updateInput(self, data):
self._input_view.setImage(data)
def modelChanged(self, model):
current_input = model.get_input(model._current_index)
self.updateInput(current_input.data)
| import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QComboBox
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGroupBox, QSplitter
from qtgui.widgets import QActivationView
from qtgui.widgets import QInputSelector, QInputInfoBox, QImageView
from qtgui.widgets import QNetworkView, QNetworkInfoBox
class Panel(QWidget):
'''Base class for different visualisation panels. In the future, the type of
visualisation should be a component in the panel, not a separate panel
class.
'''
def __init__(self, parent=None):
'''Initialization of the ActivationsView.
Parameters
----------
parent : QWidget
The parent argument is sent to the QWidget constructor.
'''
super().__init__(parent)
def initUI(self):
'''Initialise all UI elements. These are
* The ``QImageView`` showing the current input image
* A ``QInputSelector`` to show input controls
* A ``QNetworkView``, a widget to select a layer in a network
* A ``QInputInfoBox`` to display information about the input
'''
########################################################################
# User input #
########################################################################
self._input_view = QImageView(self)
# FIXME[layout]
# keep image view square (TODO: does this make sense for every input?)
self._input_view.heightForWidth = lambda w: w
self._input_view.hasHeightForWidth = lambda: True
# QNetworkInfoBox: a widget to select the input to the network
# (data array, image directory, webcam, ...)
# the 'next' button: used to load the next image
self._input_selector = QInputSelector()
self._input_info = QInputInfoBox()
# FIXME[layout]
self._input_info.setMinimumWidth(300)
input_layout = QVBoxLayout()
# FIXME[layout]
input_layout.setSpacing(0)
input_layout.setContentsMargins(0, 0, 0, 0)
input_layout.addWidget(self._input_view)
input_layout.addWidget(self._input_info)
input_layout.addWidget(self._input_selector)
input_box = QGroupBox('Input')
input_box.setLayout(input_layout)
self._input_box = input_box
########################################################################
# Network #
########################################################################
# networkview: a widget to select a network
self._network_view = QNetworkView()
self._network_selector = QComboBox()
self._network_layout = QVBoxLayout()
self._network_layout.addWidget(self._network_selector)
self._network_layout.addWidget(self._network_view)
self._network_box = QGroupBox('Network')
self._network_box.setLayout(self._network_layout)
| Python | 0.000004 |
19e347716b5efcbaaf857a2805bd5f7ed5d5ec04 | Patch waagent unit to kill process instead of entire control group | VMEncryption/main/oscrypto/encryptstates/PrereqState.py | VMEncryption/main/oscrypto/encryptstates/PrereqState.py | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
from OSEncryptionState import *
from pprint import pprint
class PrereqState(OSEncryptionState):
def __init__(self, context):
super(PrereqState, self).__init__('PrereqState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter prereq state")
if not super(PrereqState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for prereq state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering prereq state")
distro_info = self.context.distro_patcher.distro_info
self.context.logger.log("Distro info: {0}, {1}".format(distro_info[0], distro_info[1]))
if distro_info[0] == 'redhat' and distro_info[1] == '7.2':
self.context.logger.log("Enabling OS volume encryption on RHEL 7.2")
else:
raise Exception("OS volume encryption is not supported for distro {0} {1}".format(distro_info[0],
distro_info[1]))
self.context.distro_patcher.install_extras()
self._patch_waagent()
self.command_executor.Execute('systemctl daemon-reload', True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit prereq state")
return super(PrereqState, self).should_exit()
def _patch_waagent(self):
self.context.logger.log("Patching waagent")
contents = None
with open('/usr/lib/systemd/system/waagent.service', 'r') as f:
contents = f.read()
contents = re.sub(r'\[Service\]\n', '[Service]\nKillMode=process\n', contents)
with open('/usr/lib/systemd/system/waagent.service', 'w') as f:
f.write(contents)
self.context.logger.log("waagent patched successfully")
| #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
from OSEncryptionState import *
from pprint import pprint
class PrereqState(OSEncryptionState):
def __init__(self, context):
super(PrereqState, self).__init__('PrereqState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter prereq state")
if not super(PrereqState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for prereq state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering prereq state")
distro_info = self.context.distro_patcher.distro_info
self.context.logger.log("Distro info: {0}, {1}".format(distro_info[0], distro_info[1]))
if distro_info[0] == 'redhat' and distro_info[1] == '7.2':
self.context.logger.log("Enabling OS volume encryption on RHEL 7.2")
else:
raise Exception("OS volume encryption is not supported for distro {0} {1}".format(distro_info[0],
distro_info[1]))
self.context.distro_patcher.install_extras()
def should_exit(self):
self.context.logger.log("Verifying if machine should exit prereq state")
return super(PrereqState, self).should_exit()
| Python | 0 |
455874cae74a34e610650e5b5618b64fe808ea1c | fix docstring syntax error | ncharts/ncharts/templatetags/filters.py | ncharts/ncharts/templatetags/filters.py | from django import template
from ncharts import models as nc_models
register = template.Library()
@register.filter
def get_long_name(vs, v):
"""Get 'long_name' value of vs[v] """
try:
return vs[v]['long_name']
except:
return ''
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def get_key_values(var_name, variables):
for var in variables:
if var.choice_label == var_name:
return var
@register.filter
def make_tabs(variables, dset):
return dset.make_tabs(variables)
@register.filter
def make_project_tabs(projects):
"""Get the dictionary of years and projects from models.py"""
return nc_models.Project.make_tabs(projects)
| from django import template
from ncharts import models as nc_models
register = template.Library()
@register.filter
def get_long_name(vs, v):
"""Get 'long_name' value of vs[v] """
try:
return vs[v]['long_name']
except:
return ''
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def get_key_values(var_name, variables):
for var in variables:
if var.choice_label == var_name:
return var
@register.filter
def make_tabs(variables, dset):
return dset.make_tabs(variables)
@register.filter
"""Get the dictionary of years and projects from models.py"""
def make_project_tabs(projects):
return nc_models.Project.make_tabs(projects)
| Python | 0.000001 |
f7d4be60dd246193fe269dc1caaf8208bd4dba22 | improve output of compare_dfa.py. | src/trusted/validator_ragel/unreviewed/compare_dfa.py | src/trusted/validator_ragel/unreviewed/compare_dfa.py | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import dfa_parser
visited_pairs = set()
def Traverse(state1, state2, path):
if (state1, state2) in visited_pairs:
return
if state1.is_accepting != state2.is_accepting:
print map(hex, path), state1.is_accepting
print map(hex, path), state2.is_accepting
sys.exit(1)
visited_pairs.add((state1, state2))
for byte in range(256):
new_path = path + [byte]
t1 = state1.forward_transitions.get(byte)
t2 = state2.forward_transitions.get(byte)
if (t1 is None) != (t2 is None):
t = t1 or t2
s = t.to_state
path_to_accepting = new_path
while not s.is_accepting:
b = min(s.forward_transitions)
path_to_accepting.append(b)
s = s.forward_transitions[b].to_state
if t1 is not None:
print map(hex, path_to_accepting), True
print map(hex, path), '...', False
else:
print map(hex, path), '...', False
print map(hex, path_to_accepting), True
sys.exit(1)
if t1 is None:
continue
Traverse(t1.to_state, t2.to_state, new_path)
def main():
filename1, filename2 = sys.argv[1:]
_, start_state1 = dfa_parser.ParseXml(filename1)
_, start_state2 = dfa_parser.ParseXml(filename2)
Traverse(start_state1, start_state2, [])
print 'automata are equivalent'
if __name__ == '__main__':
main()
| #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import dfa_parser
visited_pairs = set()
def Traverse(state1, state2, path):
if (state1, state2) in visited_pairs:
return
if state1.is_accepting != state2.is_accepting:
print map(hex, path)
print state1.is_accepting
print state2.is_accepting
sys.exit(1)
visited_pairs.add((state1, state2))
for byte in range(256):
new_path = path + [byte]
t1 = state1.forward_transitions.get(byte)
t2 = state2.forward_transitions.get(byte)
if (t1 is None) != (t2 is None):
print map(hex, new_path)
print t1 is not None
print t2 is not None
sys.exit(1)
if t1 is None:
continue
Traverse(t1.to_state, t2.to_state, new_path)
def main():
filename1, filename2 = sys.argv[1:]
_, start_state1 = dfa_parser.ParseXml(filename1)
_, start_state2 = dfa_parser.ParseXml(filename2)
Traverse(start_state1, start_state2, [])
print 'automata are equivalent'
if __name__ == '__main__':
main()
| Python | 0.403608 |
a76915d31937f31e5d5fd7ed090198e311cffaa1 | fix csvrecorder | pikos/recorders/csv_recorder.py | pikos/recorders/csv_recorder.py | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: recorders/csv_recorder.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import csv
from pikos.recorders.abstract_recorder import AbstractRecorder, RecorderError
class CSVRecorder(AbstractRecorder):
""" The CSV Recorder is a simple text based recorder that records the
tuple of values using a scv writer.
Private
-------
_filter : callable
Used to check if the set `record` should be `recorded`. The function
accepts a tuple of the `record` values and return True is the input
should be recorded.
_writer : csv.writer
The `writer` object is owned by the CSVRecorder and exports the record
values according to the configured dialect.
_ready : bool
Signify that the Recorder is ready to accept data.
"""
def __init__(self, stream, filter_=None, **csv_kwargs):
""" Class initialization.
Parameters
----------
stream : file
A *file*-like object to use for output.
filter_ : callable
A callable function that accepts a data tuple and returns True
if the input sould be recorded.
**csv_kwargs :
Key word arguments to be passed to the *cvs.writer*.
"""
self._filter = (lambda x: True) if filter_ is None else filter_
self._writer = csv.writer(stream, **csv_kwargs)
self._ready = False
def prepare(self, fields):
""" Write the header in the csv file the first time it is called. """
if not self._ready:
self._writer.writerow(fields._fields)
self._ready = True
def finalize(self):
""" Finalize the recorder.
A do nothing method.
Raises
------
RecorderError :
Raised if the method is called without the recorder been ready to
accept data.
"""
if not self._ready:
msg = 'Method called while recorder has not been prepared'
raise RecorderError(msg)
def record(self, data):
""" Record the data entry when the filter function returns True.
Parameters
----------
values : NamedTuple
The record entry.
Raises
------
RecorderError :
Raised if the method is called without the recorder been ready to
accept data.
"""
if self._ready:
if self._filter(data):
self._writer.writerow(data)
else:
msg = 'Method called while recorder is not ready to record'
raise RecorderError(msg)
| # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: recorders/csv_recorder.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import csv
from pikos.recorders.abstract_recorder import AbstractRecorder, RecorderError
class CSVRecorder(AbstractRecorder):
""" The CSV Recorder is a simple text based recorder that records the
tuple of values using a scv writer.
Private
-------
_filter : callable
Used to check if the set `record` should be `recorded`. The function
accepts a tuple of the `record` values and return True is the input
sould be recored.
_writer : csv.writer
The `writer` object is owned by the CSVRecorder and exports the record
values according to the configured dialect.
_ready : bool
Singify that the Recorder is ready to accept data.
"""
def __init__(self, stream, filter_=None, **csv_kwargs):
""" Class initialization.
Parameters
----------
stream : file
A *file*-like object to use for output.
filter_ : callable
A callable function that accepts a data tuple and returns True
if the input sould be recorded.
**csv_kwargs :
Key word arguments to be passed to the *cvs.writer*.
"""
self._filter = (lambda x: True) if filter_ is None else filter_
self._writer = csv.writer(stream, **csv_kwargs)
self._ready = False
def prepare(self, fields):
""" Write the header in the csv file the first time it is called. """
if not self._ready:
self._writer.writerow(fields)
self._ready = True
def finalize(self):
""" Finalize the recorder.
A do nothing method.
Raises
------
RecorderError :
Raised if the method is called without the recorder been ready to
accept data.
"""
if not self._ready:
msg = 'Method called while recorder has not been prepared'
raise RecorderError(msg)
def record(self, data):
""" Record the data entry when the filter function returns True.
Parameters
----------
values : NamedTuple
The record entry.
Raises
------
RecorderError :
Raised if the method is called without the recorder been ready to
accept data.
"""
if self._ready:
if self._filter(data):
self._writer.writerow(data)
else:
msg = 'Method called while recorder is not ready to record'
raise RecorderError(msg)
| Python | 0.000013 |
7331fa69c6cd2f09b1711272278a9684af5cf9c1 | fix attachfilename | intelmq/bots/collectors/mail/mail-attach.py | intelmq/bots/collectors/mail/mail-attach.py | import re
import imbox
import zipfile
from intelmq.lib.bot import Bot, sys
from intelmq.bots.collectors.mail.lib import Mail
class MailAttachCollectorBot(Bot):
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host, self.parameters.mail_user, self.parameters.mail_password, self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.folder, unread=True)
if emails:
for uid, message in emails:
if self.parameters.subject_regex and not re.search(self.parameters.subject_regex, message.subject):
continue
self.logger.info("Reading email report")
for attach in message.attachments:
if not attach:
continue
attach_name = attach['filename'][1:len(attach['filename'])-1] # remove quote marks from filename
if re.search(self.parameters.attach_regex, attach_name):
if self.parameters.attach_unzip:
zipped = zipfile.ZipFile(attach['content'])
report = zipped.read(zipped.namelist()[0])
else:
report = attach['content']
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
if __name__ == "__main__":
bot = MailAttachCollectorBot(sys.argv[1])
bot.start() | import re
import imbox
import zipfile
from intelmq.lib.bot import Bot, sys
from intelmq.bots.collectors.mail.lib import Mail
class MailAttachCollectorBot(Bot):
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host, self.parameters.mail_user, self.parameters.mail_password, self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.folder, unread=True)
if emails:
for uid, message in emails:
if self.parameters.subject_regex and not re.search(self.parameters.subject_regex, message.subject):
continue
self.logger.info("Reading email report")
for attach in message.attachments:
if not attach:
continue
if re.search(self.parameters.attach_regex, attach['filename']):
if self.parameters.attach_unzip:
zipped = zipfile.ZipFile(attach['content'])
report = zipped.read(zipped.namelist()[0])
else:
report = attach['content']
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
if __name__ == "__main__":
bot = MailAttachCollectorBot(sys.argv[1])
bot.start() | Python | 0.000003 |
b481426e52661b702fa014a86c68b015f46feb1f | remove deprecated test suite declarations | account_invoice_constraint_chronology/tests/__init__.py | account_invoice_constraint_chronology/tests/__init__.py | # -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import test_account_constraint_chronology
| # -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import test_account_constraint_chronology
fast_suite = [
test_account_constraint_chronology,
]
checks = [
test_account_constraint_chronology,
]
| Python | 0 |
7fef3fbd8f4a68a2cf584721930c276eb49b16ee | Fix issue in infer_android_package_name with tests in //javatests | build_extensions/infer_android_package_name.bzl | build_extensions/infer_android_package_name.bzl | """A rule for inferring an android package name."""
def infer_android_package_name():
"""Infer an android package name based on current path below 'javatests'"""
path = native.package_name()
javatests_index = path.rindex("javatests/") + len("javatests/")
return path[javatests_index:].replace("/", ".")
| """A rule for inferring an android package name."""
def infer_android_package_name():
"""Infer an android package name based on current path below 'javatests'"""
path = native.package_name()
javatests_index = path.rindex("/javatests/") + len("/javatests/")
return path[javatests_index:].replace("/", ".")
| Python | 0.000021 |
c2f668b6b403bde09485595c2ac2852220739c93 | Fix docstring. | morepath/toposort.py | morepath/toposort.py | from .error import TopologicalSortError
def topological_sort(l, get_depends):
result = []
marked = set()
temporary_marked = set()
def visit(n):
if n in marked:
return
if n in temporary_marked:
raise TopologicalSortError("Not a DAG")
temporary_marked.add(n)
for m in get_depends(n):
visit(m)
marked.add(n)
result.append(n)
for n in l:
visit(n)
return result
def toposorted(infos):
"""Sort infos topologically.
Info object must have a key attribute, and before and after
attributes that returns a list of keys.
"""
key_to_info = {}
depends = {}
for info in infos:
key_to_info[info.key] = info
depends[info.key] = []
for info in infos:
for after in info.after:
after_info = key_to_info[after]
depends[info.key].append(after_info)
for before in info.before:
before_info = key_to_info[before]
depends[before_info.key].append(info)
return topological_sort(
infos, lambda info: depends[info.key])
| from .error import TopologicalSortError
def topological_sort(l, get_depends):
result = []
marked = set()
temporary_marked = set()
def visit(n):
if n in marked:
return
if n in temporary_marked:
raise TopologicalSortError("Not a DAG")
temporary_marked.add(n)
for m in get_depends(n):
visit(m)
marked.add(n)
result.append(n)
for n in l:
visit(n)
return result
def toposorted(infos):
"""Sort infos topologically.
Info object must have a key attribute, and before and after
methods that returns a list of keys.
"""
key_to_info = {}
depends = {}
for info in infos:
key_to_info[info.key] = info
depends[info.key] = []
for info in infos:
for after in info.after:
after_info = key_to_info[after]
depends[info.key].append(after_info)
for before in info.before:
before_info = key_to_info[before]
depends[before_info.key].append(info)
return topological_sort(
infos, lambda info: depends[info.key])
| Python | 0.000001 |
da8b7471ee773c835edae6f333938caa34e17cbd | Update wrap.py | tesco-delivery-bot/wrap.py | tesco-delivery-bot/wrap.py | #!/usr/local/bin/python3.7
import subprocess
import os
import http.client, urllib
PO_API_TOKEN = ""
PO_USER_KEY = ""
NODE_BIN = "/usr/local/bin/node"
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def define_po_keys():
global PO_API_TOKEN, PO_USER_KEY
try:
PO_API_TOKEN = os.environ["PO_API_TOKEN"]
PO_USER_KEY = os.environ["PO_USER_KEY"]
except KeyError as err:
print(
f"Error: {err}. Check if your environment defines PO_API_TOKEN and PO_USER_KEY"
)
exit(1)
if not PO_API_TOKEN or not PO_USER_KEY:
print(f"Error: PushOver token or key are empty.")
exit(1)
def check_tesco() -> list:
with cd("~/delivery-slot-bot"):
result = subprocess.run(
[NODE_BIN, "delivery-slots.js"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
if result.stderr:
print(f"ERROR: {result.stderr}")
return []
result_list = result.stdout.split("\n")
print(result_list)
def process_tesco(t_list) -> str:
return "ok world"
def send_po(message) -> bool:
if not message:
print("message is emtpy")
return True
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request(
"POST",
"/1/messages.json",
urllib.parse.urlencode(
{"token": PO_API_TOKEN, "user": PO_USER_KEY, "message": message,}
),
{"Content-type": "application/x-www-form-urlencoded"},
)
res = conn.getresponse()
if res.status not in range(200, 300):
return False
return True
if __name__ == "__main__":
define_po_keys()
res_tesco = check_tesco()
message = process_tesco(res_tesco)
send_po(message)
| import subprocess
import os
import http.client, urllib
PO_API_TOKEN = ""
PO_USER_KEY = ""
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def define_po_keys() -> None:
global PO_API_TOKEN, PO_USER_KEY
try:
PO_API_TOKEN = os.environ["PO_API_TOKEN"]
PO_USER_KEY = os.environ["PO_USER_KEY"]
except KeyError as err:
print(f"Error: {err}. Check if your environment defines PO_API_TOKEN and PO_USER_KEY")
exit(1)
if not PO_API_TOKEN or not PO_USER_KEY:
print(f"Error: PushOver token or key are empty.")
exit(1)
def check_tesco() -> list:
with cd("/path/delivery-slot-bot"):
result = subprocess.run(["node", "delivery-slots.js"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.stderr:
print(f"ERROR: {result.stderr}")
return []
result_list = result.stdout.split('\n')
print(result_list)
def process_tesco(t_list) -> str:
pass
def send_po(message) -> bool:
if not message:
print("Message is emtpy")
return True
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": PO_API_TOKEN,
"user": PO_USER_KEY,
"message": message,
}), { "Content-type": "application/x-www-form-urlencoded" })
res = conn.getresponse()
if (res.status not in range(200, 300)):
return False
return True
if __name__ == "__main__":
| Python | 0.000001 |
24bbdd828253e5a1df92dac31f271dde6739ef8d | Refactor and fix some things | vega/polestar.py | vega/polestar.py | import os
import json
import cgi
import codecs
from IPython import display
JS = ['static/polestar/scripts/vendor-6292494709.js',
'static/polestar/scripts/app-ddc64cf3e9.js']
CSS = ['static/polestar/scripts/vendor-5779b264ab.css',
'static/polestar/styles/app-767140e98a.css']
TEAMPLATE = 'static/index.html'
IFRAME_STYLE = 'border: none; width: 100%; min-height: 580px;'
def publish(dataframe):
"""Create and immediately display even if it is not the last line."""
display.display(create(dataframe))
def create(dataframe):
"""Creates polestar from a dataframe"""
return Polestar(dataframe.columns, dataframe.values)
class Polestar(display.DisplayObject):
"""Defines Polestar widget"""
def __init__(self, columns, data):
"""Constructor
Args:
columns: a list of column names
data: list of rows"""
self.data = data
self.columns = columns
def __get_content(self, path):
abs_path = os.path.abspath(path)
with codecs.open(abs_path, encoding='utf-8') as f:
return f.read()
def __styles(self, paths):
out = []
for path in paths:
out.append(u'<style>/*# sourceURL={path} */\n{body}</style>'.format(
path=path, body=self.__get_content(path)))
return u'\n'.join(out)
def __scripts(self, paths):
out = []
for path in paths:
out.append((u'<script type="text/javascript">//@ sourceURL={path}'
'\n{body}</script>').format(
path=path, body=self.__get_content(path)))
return u'\n'.join(out)
def __data(self):
res = []
for row in self.data.tolist():
res.append({k: v for k, v in zip(self.columns, row)})
return res
def __escape(self, body):
return cgi.escape(body, quote=True)
def _repr_html_(self):
"""Used by the frontend to show html for polestar."""
template = self.__get_content(TEAMPLATE)
body = template.format(
styles=self.__styles(CSS),
scripts=self.__scripts(JS),
data=json.dumps(self.__data()))
output = u'<iframe srcdoc="{srcdoc}" style="{style}"></iframe>'.format(
srcdoc=self.__escape(body), style=IFRAME_STYLE)
return output
| import os
import json
import cgi
import codecs
from IPython import display
JS = ['polestar/scripts/vendor-13742e93f0.js', 'polestar/scripts/app-512c772610.js']
CSS = ['polestar/styles/app-a696a065c6.css', 'polestar/scripts/vendor-e4b58aff85.css']
TEAMPLATE = 'index.html'
IFRAME_STYLE = 'border: none; width: 100%; min-height: 580px;'
def publish(dataframe):
"""Create and immediately display even if it is not the last line."""
display.display(create(dataframe))
def create(dataframe):
"""Creates polestar from a dataframe"""
return Polestar(dataframe.columns, dataframe.values)
class Polestar(display.DisplayObject):
"""Defines Polestar widget"""
def __init__(self, columns, data):
"""Constructor
Args:
columns: a list of column names
data: list of rows"""
self.data = data
self.columns = columns
def __get_content(self, path):
path = os.path.join('static', path)
abs_path = os.path.abspath(path)
with codecs.open(abs_path, encoding='utf-8') as f:
return path, f.read()
def __styles(self, paths):
out = []
for p in paths:
path, body = self.__get_content(p)
out.append(u'<style>/*# sourceURL={path} */\n{body}</style>'.format(
path=path, body=body))
return u'\n'.join(out)
def __scripts(self, paths):
out = []
for p in paths:
path, body = self.__get_content(p)
out.append((u'<script type="text/javascript">//@ sourceURL={path}'
'\n{body}</script>').format(path=path, body=body))
return u'\n'.join(out)
def __data(self):
return self.data.tolist()
def __escape(self, body):
return cgi.escape(body, quote=True)
def _repr_html_(self):
"""Used by the frontend to show html for polestar."""
_, template = self.__get_content(TEAMPLATE)
body = template.format(
styles=self.__styles(CSS),
scripts=self.__scripts(JS),
data=json.dumps(self.__data()))
output = u'<iframe srcdoc="{srcdoc}" style="{style}"></iframe>'.format(
srcdoc=self.__escape(body), style=IFRAME_STYLE)
return output
| Python | 0.000023 |
577a526284236cf9a31b3b339902bbeed4991b63 | work on profile tests, part-baked | petl/test/test_profile.py | petl/test/test_profile.py | """
TODO doc me
"""
from petl.profile import *
def test_profile():
"""
TODO doc me
"""
table = [['foo', 'bar'],
['A', 1],
['B', 2],
['B', '3', True],
['D', 'xyz'],
['E']]
profiler = Profiler(table)
# profile the table with default analyses - list field names and
# report the sample size used for profiling
profile = profiler.profile()
assert profile['general']['default']['field_names'] == ('foo', 'bar')
assert profile['general']['default']['sample_size'] == 5
# add row lengths analysis
profiler.add(RowLengths)
profile = profiler.profile()
assert profile['general']['row_lengths']['max_row_length'] == 3
assert profile['general']['row_lengths']['min_row_length'] == 1
assert profile['general']['row_lengths']['mean_row_length'] == 2
# add distinct values analysis on field 'foo'
profiler.add(DistinctValues, field='foo')
profile = profiler.profile()
assert profile['fields']['foo']['distinct_values'] == {'A': 1, 'B': 2, 'D': 1, 'E': 1}
# add basic statistics analysis on field 'bar'
profiler.add(BasicStatistics, field='foo')
profile = profiler.profile()
assert profile['fields']['bar']['basic_statistics']['min'] == 1
assert profile['fields']['bar']['basic_statistics']['max'] == 3
assert profile['fields']['bar']['basic_statistics']['mean'] == 2
assert profile['fields']['bar']['basic_statistics']['sum'] == 6
assert profile['fields']['bar']['basic_statistics']['count'] == 3
assert profile['fields']['bar']['basic_statistics']['errors'] == 2
# add types analysis on all fields
profiler.add(Types, all_fields=True) # TODO a different method?
profile = profiler.profile()
assert profile['fields']['foo']['types']['actual_types'] == {'string': 5}
assert profile['fields']['foo']['types']['applicable_types'] == {'string': 5}
assert profile['fields']['foo']['types']['inferred_type'] == 'string'
assert profile['fields']['bar']['types']['actual_types'] == {'int': 2, 'string': 2}
assert profile['fields']['foo']['types']['applicable_types'] == {'int': 3, 'float': 3, 'string': 5}
assert profile['fields']['foo']['types']['inferred_type'] == 'int'
| """
TODO doc me
"""
from petl.profile import *
def test_profile():
"""
TODO doc me
"""
table = [['foo', 'bar'],
['A', 1],
['B', 2],
['B', '3', True],
['D', 'xyz'],
['E']]
profiler = Profiler(table)
profile = profiler.profile()
assert profile['general']['field_names'] == ('foo', 'bar')
assert profile['general']['sample_size'] == 5
profiler.add(record_lengths)
profile = profiler.profile()
assert profile['general']['max_row_length'] == 3
assert profile['general']['min_row_length'] == 1
assert profile['general']['mean_row_length'] == 2
profiler.add(distinct_values, field='foo')
profile = profiler.profile()
assert profile['fields']['foo']['distinct_values']['values'] == {'A', 'B', 'D', 'E'}
assert profile['fields']['foo']['distinct_values']['counts']['A'] == 1
assert profile['fields']['foo']['distinct_values']['counts']['B'] == 2
assert profile['fields']['foo']['distinct_values']['counts']['D'] == 1
assert profile['fields']['foo']['distinct_values']['counts']['E'] == 1
assert 'C' not in profile['fields']['foo']['distinct_values']['counts']
| Python | 0 |
87244598ed08e790835818656ecba0178bb7ca89 | Upgrade to a better version | fsplit/__init__.py | fsplit/__init__.py | #!/usr/bin/env python2
##
# fsplit
# https://github.com/leosartaj/fsplit.git
#
# Copyright (c) 2014 Sartaj Singh
# Licensed under the MIT license.
##
from .info import __version__ # define __version__ variable
from .info import __desc__ # define __desc__ variable for description
| #!/usr/bin/env python2
##
# fsplit
# https://github.com/leosartaj/fsplit.git
#
# Copyright (c) 2014 Sartaj Singh
# Licensed under the MIT license.
##
from info import __version__ # define __version__ variable
from info import __desc__ # define __desc__ variable for description
| Python | 0 |
d879440cef6bc1985ab4e8bf8f81163b661beb1b | change locale and admin settings | censusreporter/config/base/settings.py | censusreporter/config/base/settings.py | # Django settings for censusreporter project.
import os
dirname = os.path.dirname
PROJECT_ROOT = os.path.abspath(os.path.join(dirname(__file__),"..",".."))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# should be set by each settings file
# ROOT_URLCONF = 'config.dev.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '%s/census_app_db' % PROJECT_ROOT,
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'census',
)
ALLOWED_HOSTS = []
TIME_ZONE = 'Africa/Johannesburg'
LANGUAGE_CODE = 'en-uk'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
USE_TZ = True
SECRET_KEY = '!%j-u4&(q8qu4@dq=ukth27+q!v-!h^jck14bf=spqht847$4q'
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = PROJECT_ROOT + '/static/'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'censusreporter.wsgi.application'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
ADMINS = (
('Greg Kempe', 'greg@kempe.net'),
('Rizmari Versfeld', 'rizziepit@gmail.com'),
)
MANAGERS = ADMINS
API_URL = 'http://api.censusreporter.org'
| # Django settings for censusreporter project.
import os
dirname = os.path.dirname
PROJECT_ROOT = os.path.abspath(os.path.join(dirname(__file__),"..",".."))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# should be set by each settings file
# ROOT_URLCONF = 'config.dev.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '%s/census_app_db' % PROJECT_ROOT,
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'census',
)
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
USE_TZ = True
SECRET_KEY = '!%j-u4&(q8qu4@dq=ukth27+q!v-!h^jck14bf=spqht847$4q'
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = PROJECT_ROOT + '/static/'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'censusreporter.wsgi.application'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
ADMINS = (
('Ian Dees', 'ian.dees@gmail.com'),
('Joe Germuska', 'joegermuska@gmail.com'),
('Ryan Pitts', 'ryan.a.pitts@gmail.com'),
)
MANAGERS = ADMINS
API_URL = 'http://api.censusreporter.org'
| Python | 0 |
85124382cd3f90b439b27c8dd5a82f47925ddab7 | fix settings print | mppsolar/__init__.py | mppsolar/__init__.py | # -*- coding: utf-8 -*-
# !/usr/bin/python
import logging
from argparse import ArgumentParser
# import mppcommands
import mpputils
logger = logging.getLogger()
# if __name__ == '__main__':
def main():
parser = ArgumentParser(description='MPP Solar Command Utility')
parser.add_argument('-c', '--command', help='Command to run', default='QID')
parser.add_argument('-ll', '--loglevel',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level')
parser.add_argument('-d', '--device', type=str, help='Serial device to communicate with', default='/dev/ttyUSB0')
parser.add_argument('-b', '--baud', type=int, help='Baud rate for serial communications', default=2400)
parser.add_argument('-l', '--listknown', action='store_true', help='List known commands')
parser.add_argument('-s', '--getStatus', action='store_true', help='Get Inverter Status')
parser.add_argument('-t', '--getSettings', action='store_true', help='Get Inverter Settings')
parser.add_argument('-H', '--makepretty', action='store_true', help='Display result with descriptions etc if possible')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
logging.debug('command %s', args.command)
logging.debug('Serial device used: %s, baud rate: %d', args.device, args.baud)
# mp = mppcommands.mppCommands(args.device, args.baud)
mp = mpputils.mppUtils(args.device, args.baud)
if(args.listknown):
for line in mp.getKnownCommands():
print line
elif(args.getStatus):
fullStatus = mp.getFullStatus()
for key in fullStatus:
print "{}\t{} {}".format(key, fullStatus[key]['value'], fullStatus[key]['unit'])
elif(args.getSettings):
settings = mp.getSettings()
for key in settings:
print "{}\t{}\t{} {}".format(key, settings[key]['default'],
settings[key]['value'],
settings[key]['unit'])
else:
# TODO: check if command is valid
# maybe check if query or setter and ...
if(args.makepretty):
for line in mp.getResponsePretty(args.command):
print line
else:
print mp.getResponse(args.command)
| # -*- coding: utf-8 -*-
# !/usr/bin/python
import logging
from argparse import ArgumentParser
# import mppcommands
import mpputils
logger = logging.getLogger()
# if __name__ == '__main__':
def main():
parser = ArgumentParser(description='MPP Solar Command Utility')
parser.add_argument('-c', '--command', help='Command to run', default='QID')
parser.add_argument('-ll', '--loglevel',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level')
parser.add_argument('-d', '--device', type=str, help='Serial device to communicate with', default='/dev/ttyUSB0')
parser.add_argument('-b', '--baud', type=int, help='Baud rate for serial communications', default=2400)
parser.add_argument('-l', '--listknown', action='store_true', help='List known commands')
parser.add_argument('-s', '--getStatus', action='store_true', help='Get Inverter Status')
parser.add_argument('-t', '--getSettings', action='store_true', help='Get Inverter Settings')
parser.add_argument('-H', '--makepretty', action='store_true', help='Display result with descriptions etc if possible')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
logging.debug('command %s', args.command)
logging.debug('Serial device used: %s, baud rate: %d', args.device, args.baud)
# mp = mppcommands.mppCommands(args.device, args.baud)
mp = mpputils.mppUtils(args.device, args.baud)
if(args.listknown):
for line in mp.getKnownCommands():
print line
elif(args.getStatus):
fullStatus = mp.getFullStatus()
for key in fullStatus:
print key, fullStatus[key]['value'], fullStatus[key]['unit']
elif(args.getSettings):
for line in mp.getSettings():
print line, line['value'], line['unit']
else:
# TODO: check if command is valid
# maybe check if query or setter and ...
if(args.makepretty):
for line in mp.getResponsePretty(args.command):
print line
else:
print mp.getResponse(args.command)
| Python | 0.000001 |
bafdbd28e35d80d28bfb82c23532533cb2915066 | Add docs for MissingInputFiles 'message' arg. | fuel/exceptions.py | fuel/exceptions.py | class AxisLabelsMismatchError(ValueError):
"""Raised when a pair of axis labels tuples do not match."""
class ConfigurationError(Exception):
"""Error raised when a configuration value is requested but not set."""
class MissingInputFiles(Exception):
"""Exception raised by a converter when input files are not found.
Parameters
----------
message : str
The error message to be associated with this exception.
filenames : list
A list of filenames that were not found.
"""
def __init__(self, message, filenames):
self.filenames = filenames
super(MissingInputFiles, self).__init__(message, filenames)
class NeedURLPrefix(Exception):
"""Raised when a URL is not provided for a file."""
| class AxisLabelsMismatchError(ValueError):
"""Raised when a pair of axis labels tuples do not match."""
class ConfigurationError(Exception):
"""Error raised when a configuration value is requested but not set."""
class MissingInputFiles(Exception):
"""Exception raised by a converter when input files are not found.
Parameters
----------
filenames : list
A list of filenames that were not found.
"""
def __init__(self, message, filenames):
self.filenames = filenames
super(MissingInputFiles, self).__init__(message, filenames)
class NeedURLPrefix(Exception):
"""Raised when a URL is not provided for a file."""
| Python | 0 |
d2eda42f9f5769d0d42ab38d6bfd912ccff53327 | remove Q1 from status data (as doesnt work on all inverters and is undocumented) | mppsolar/mpputils.py | mppsolar/mpputils.py | """
MPP Solar Inverter Command Library
library of utility and helpers for MPP Solar PIP-4048MS inverters
mpputils.py
"""
import logging
from .mppinverter import mppInverter
from .mppinverter import NoDeviceError
log = logging.getLogger('MPP-Solar')
def getVal(_dict, key, ind=None):
if key not in _dict:
return ""
if ind is None:
return _dict[key]
else:
return _dict[key][ind]
class mppUtils:
"""
MPP Solar Inverter Utility Library
"""
def __init__(self, serial_device=None, baud_rate=2400, inverter_model='standard'):
if (serial_device is None):
raise NoDeviceError("A serial device must be supplied, e.g. /dev/ttyUSB0")
self.inverter = mppInverter(serial_device, baud_rate, inverter_model)
def getKnownCommands(self):
return self.inverter.getAllCommands()
def getResponseDict(self, cmd):
return self.inverter.getResponseDict(cmd)
def getResponse(self, cmd):
return self.inverter.getResponse(cmd)
def getSerialNumber(self):
return self.inverter.getSerialNumber()
def getFullStatus(self):
"""
Helper function that returns all the status data
"""
status = {}
# serial_number = self.getSerialNumber()
data = self.getResponseDict("QPIGS")
# data.update(self.getResponseDict("Q1"))
# Need to get 'Parallel' info, but dont know what the parallel number for the correct inverter is...
# parallel_data = self.mp.getResponseDict("QPGS0")
# This 'hack' only works for 2 inverters in parallel.
# if parallel_data['serial_number'][0] != self.getSerialNumber():
# parallel_data = self.mp.getResponseDict("QPGS1")
# status_data.update(parallel_data)
for item in data.keys():
key = '{}'.format(item).replace(" ", "_")
status[key] = {"value": data[key][0], "unit": data[key][1]}
# Still have 'Device Status' from QPIGS
# Still have QPGSn
return status
def getSettings(self):
"""
Query inverter for all current settings
"""
# serial_number = self.getSerialNumber()
default_settings = self.getResponseDict("QDI")
current_settings = self.getResponseDict("QPIRI")
flag_settings = self.getResponseDict("QFLAG")
# current_settings.update(flag_settings) # Combine current and flag settings dicts
settings = {}
# {"Battery Bulk Charge Voltage": {"unit": "V", "default": 56.4, "value": 57.4}}
for item in current_settings.keys():
key = '{}'.format(item).replace(" ", "_")
settings[key] = {"value": getVal(current_settings, key, 0),
"unit": getVal(current_settings, key, 1),
"default": getVal(default_settings, key, 0)}
for key in flag_settings:
_key = '{}'.format(key).replace(" ", "_")
if _key in settings:
settings[_key]['value'] = getVal(flag_settings, key, 0)
else:
settings[_key] = {'value': getVal(flag_settings, key, 0), "unit": "", "default": ""}
return settings
| """
MPP Solar Inverter Command Library
library of utility and helpers for MPP Solar PIP-4048MS inverters
mpputils.py
"""
import logging
from .mppinverter import mppInverter
from .mppinverter import NoDeviceError
log = logging.getLogger('MPP-Solar')
def getVal(_dict, key, ind=None):
if key not in _dict:
return ""
if ind is None:
return _dict[key]
else:
return _dict[key][ind]
class mppUtils:
"""
MPP Solar Inverter Utility Library
"""
def __init__(self, serial_device=None, baud_rate=2400, inverter_model='standard'):
if (serial_device is None):
raise NoDeviceError("A serial device must be supplied, e.g. /dev/ttyUSB0")
self.inverter = mppInverter(serial_device, baud_rate, inverter_model)
def getKnownCommands(self):
return self.inverter.getAllCommands()
def getResponseDict(self, cmd):
return self.inverter.getResponseDict(cmd)
def getResponse(self, cmd):
return self.inverter.getResponse(cmd)
def getSerialNumber(self):
return self.inverter.getSerialNumber()
def getFullStatus(self):
"""
Helper function that returns all the status data
"""
status = {}
# serial_number = self.getSerialNumber()
data = self.getResponseDict("Q1")
data.update(self.getResponseDict("QPIGS"))
# Need to get 'Parallel' info, but dont know what the parallel number for the correct inverter is...
# parallel_data = self.mp.getResponseDict("QPGS0")
# This 'hack' only works for 2 inverters in parallel.
# if parallel_data['serial_number'][0] != self.getSerialNumber():
# parallel_data = self.mp.getResponseDict("QPGS1")
# status_data.update(parallel_data)
for item in data.keys():
key = '{}'.format(item).replace(" ", "_")
status[key] = {"value": data[key][0], "unit": data[key][1]}
# Still have 'Device Status' from QPIGS
# Still have QPGSn
return status
def getSettings(self):
"""
Query inverter for all current settings
"""
# serial_number = self.getSerialNumber()
default_settings = self.getResponseDict("QDI")
current_settings = self.getResponseDict("QPIRI")
flag_settings = self.getResponseDict("QFLAG")
# current_settings.update(flag_settings) # Combine current and flag settings dicts
settings = {}
# {"Battery Bulk Charge Voltage": {"unit": "V", "default": 56.4, "value": 57.4}}
for item in current_settings.keys():
key = '{}'.format(item).replace(" ", "_")
settings[key] = {"value": getVal(current_settings, key, 0),
"unit": getVal(current_settings, key, 1),
"default": getVal(default_settings, key, 0)}
for key in flag_settings:
_key = '{}'.format(key).replace(" ", "_")
if _key in settings:
settings[_key]['value'] = getVal(flag_settings, key, 0)
else:
settings[_key] = {'value': getVal(flag_settings, key, 0), "unit": "", "default": ""}
return settings
| Python | 0 |
0da95bdfc184614edca41b41ac3409295352fff6 | Update days_between.py | checkio/python/oreilly/days_between.py | checkio/python/oreilly/days_between.py | import datetime
def days_diff(date1, date2):
d1 = datetime.datetime(*date1)
d2 = datetime.datetime(*date2)
return abs((d2-d1).days)
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3
assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238
assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238
| Python | 0.000003 | |
e35649188f10e99381926318192c856e85245ef9 | update apk version to support toast | uiautomator2/version.py | uiautomator2/version.py | # coding: utf-8
#
__apk_version__ = '1.0.12'
# 1.0.12 add toast support
# 1.0.11 add auto install support
# 1.0.10 fix service not started bug
# 1.0.9 fix apk version code and version name
# ERR: 1.0.8 bad version number. show ip on notification
# ERR: 1.0.7 bad version number. new input method, some bug fix
__atx_agent_version__ = '0.2.1'
# 0.2.1 support occupy /minicap connection
# 0.2.0 add session support
# 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell
# 0.1.7 fix dns resolve error in /install
# 0.1.6 change download logic. auto fix orientation
# 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10
# 0.1.4 phone remote control
# 0.1.2 /download support
# 0.1.1 minicap buildin | # coding: utf-8
#
__apk_version__ = '1.0.11'
# 1.0.11 add auto install support
# 1.0.10 fix service not started bug
# 1.0.9 fix apk version code and version name
# ERR: 1.0.8 bad version number. show ip on notification
# ERR: 1.0.7 bad version number. new input method, some bug fix
__atx_agent_version__ = '0.2.1'
# 0.2.1 support occupy /minicap connection
# 0.2.0 add session support
# 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell
# 0.1.7 fix dns resolve error in /install
# 0.1.6 change download logic. auto fix orientation
# 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10
# 0.1.4 phone remote control
# 0.1.2 /download support
# 0.1.1 minicap buildin | Python | 0 |
f7034b2988d9ebcb3836b3e12b609f6098269f2c | remove unused shebang | potty_oh/waveform.py | potty_oh/waveform.py | # Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Waveform or Signal Generator Library for creating audio waveforms."""
import math
import numpy
class Generator(object):
def __init__(self, length=1.0, framerate=44100, verbose=False):
self.length = length
self.framerate = framerate
self.verbose = verbose
def _init(self, length=None, framerate=None, verbose=None, **kwargs):
if length:
self.length = length
if framerate:
self.framerate = framerate
if verbose:
self.verbose = verbose
# framecount = frames / sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount) / self.framerate
self.dprint('framecount = %s' % self.framecount)
self.dprint('rectified length = %s' % self.length)
self.wavedata = numpy.zeros((self.framecount, 1))
def dprint(self, msg):
"""Conditionally print a debugging message."""
if self.verbose:
print(msg)
def whitenoise(self, *args, **kwargs):
"""Random Gaussian White Noise."""
self._init(*args, **kwargs)
self.wavedata = numpy.random.randn(self.framecount, 1)
return self.wavedata
def _sinusoid_amplitude(self, frame, frequency):
"""Calculate the amplitude of a sinusoid wave at a given frequency."""
# avoid divide by zero
frame = 0.001 if frame is 0 else frame
return math.sin(frame /
((self.framerate / frequency) / math.pi))
def sin_constant(self, frequency, *args, **kwargs):
"""Sinusoid wave of constant frequency."""
self._init(*args, **kwargs)
frequency = float(frequency)
for frame in range(self.framecount):
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
def sin_linear(self, start_freq, end_freq, *args, **kwargs):
"""Sinusoid wave of linearly changing frequency."""
self._init(*args, **kwargs)
for frame in range(self.framecount):
# freq = start_freq + frame * freq_rate
# freq_rate = total_freq_change / framecount
frequency = start_freq + frame * (
float(end_freq - start_freq) / self.framecount)
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
| #!/usr/bin/env python
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Waveform or Signal Generator Library for creating audio waveforms."""
import math
import numpy
class Generator(object):
def __init__(self, length=1.0, framerate=44100, verbose=False):
self.length = length
self.framerate = framerate
self.verbose = verbose
def _init(self, length=None, framerate=None, verbose=None, **kwargs):
if length:
self.length = length
if framerate:
self.framerate = framerate
if verbose:
self.verbose = verbose
# framecount = frames / sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount) / self.framerate
self.dprint('framecount = %s' % self.framecount)
self.dprint('rectified length = %s' % self.length)
self.wavedata = numpy.zeros((self.framecount, 1))
def dprint(self, msg):
"""Conditionally print a debugging message."""
if self.verbose:
print(msg)
def whitenoise(self, *args, **kwargs):
"""Random Gaussian White Noise."""
self._init(*args, **kwargs)
self.wavedata = numpy.random.randn(self.framecount, 1)
return self.wavedata
def _sinusoid_amplitude(self, frame, frequency):
"""Calculate the amplitude of a sinusoid wave at a given frequency."""
# avoid divide by zero
frame = 0.001 if frame is 0 else frame
return math.sin(frame /
((self.framerate / frequency) / math.pi))
def sin_constant(self, frequency, *args, **kwargs):
"""Sinusoid wave of constant frequency."""
self._init(*args, **kwargs)
frequency = float(frequency)
for frame in range(self.framecount):
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
def sin_linear(self, start_freq, end_freq, *args, **kwargs):
"""Sinusoid wave of linearly changing frequency."""
self._init(*args, **kwargs)
for frame in range(self.framecount):
# freq = start_freq + frame * freq_rate
# freq_rate = total_freq_change / framecount
frequency = start_freq + frame * (
float(end_freq - start_freq) / self.framecount)
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
| Python | 0.000002 |
927d762daf41a32be37bd056c889e170d6efbb93 | Update foreign key references for django 2.0. This change was made in Django core 3 years ago. | watson/models.py | watson/models.py | """Models used by django-watson."""
from __future__ import unicode_literals
import uuid
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible, force_text
from django.utils.functional import cached_property
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
(
isinstance(pk, (models.IntegerField, models.AutoField)) and
not isinstance(pk, models.BigIntegerField)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.remote_field.model)
)
)
def get_str_pk(obj, connection):
return obj.pk.hex if isinstance(obj.pk, uuid.UUID) and connection.vendor != "postgresql" else force_text(obj.pk)
META_CACHE_KEY = "_meta_cache"
@python_2_unicode_compatible
class SearchEntry(models.Model):
"""An entry in the search index."""
engine_slug = models.CharField(
max_length=200,
db_index=True,
default="default",
)
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
)
object_id = models.TextField()
object_id_int = models.IntegerField(
blank=True,
null=True,
db_index=True,
)
object = GenericForeignKey()
title = models.CharField(
max_length=1000,
)
description = models.TextField(
blank=True,
)
content = models.TextField(
blank=True,
)
url = models.CharField(
max_length=1000,
blank=True,
)
meta_encoded = models.TextField()
def _deserialize_meta(self):
from watson.search import SearchEngine
engine = SearchEngine._created_engines[self.engine_slug]
model = ContentType.objects.get_for_id(self.content_type_id).model_class()
adapter = engine.get_adapter(model)
return adapter.deserialize_meta(self.meta_encoded)
@cached_property
def meta(self):
"""Returns the meta information stored with the search entry."""
# Attempt to use the cached value.
if hasattr(self, META_CACHE_KEY):
return getattr(self, META_CACHE_KEY)
# Decode the meta.
meta_value = self._deserialize_meta()
setattr(self, META_CACHE_KEY, meta_value)
return meta_value
def get_absolute_url(self):
"""Returns the URL of the referenced object."""
return self.url
def __str__(self):
"""Returns a string representation."""
return self.title
class Meta:
verbose_name_plural = "search entries"
app_label = 'watson'
| """Models used by django-watson."""
from __future__ import unicode_literals
import uuid
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible, force_text
from django.utils.functional import cached_property
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
(
isinstance(pk, (models.IntegerField, models.AutoField)) and
not isinstance(pk, models.BigIntegerField)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.rel.to)
)
)
def get_str_pk(obj, connection):
return obj.pk.hex if isinstance(obj.pk, uuid.UUID) and connection.vendor != "postgresql" else force_text(obj.pk)
META_CACHE_KEY = "_meta_cache"
@python_2_unicode_compatible
class SearchEntry(models.Model):
"""An entry in the search index."""
engine_slug = models.CharField(
max_length=200,
db_index=True,
default="default",
)
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
)
object_id = models.TextField()
object_id_int = models.IntegerField(
blank=True,
null=True,
db_index=True,
)
object = GenericForeignKey()
title = models.CharField(
max_length=1000,
)
description = models.TextField(
blank=True,
)
content = models.TextField(
blank=True,
)
url = models.CharField(
max_length=1000,
blank=True,
)
meta_encoded = models.TextField()
def _deserialize_meta(self):
from watson.search import SearchEngine
engine = SearchEngine._created_engines[self.engine_slug]
model = ContentType.objects.get_for_id(self.content_type_id).model_class()
adapter = engine.get_adapter(model)
return adapter.deserialize_meta(self.meta_encoded)
@cached_property
def meta(self):
"""Returns the meta information stored with the search entry."""
# Attempt to use the cached value.
if hasattr(self, META_CACHE_KEY):
return getattr(self, META_CACHE_KEY)
# Decode the meta.
meta_value = self._deserialize_meta()
setattr(self, META_CACHE_KEY, meta_value)
return meta_value
def get_absolute_url(self):
"""Returns the URL of the referenced object."""
return self.url
def __str__(self):
"""Returns a string representation."""
return self.title
class Meta:
verbose_name_plural = "search entries"
app_label = 'watson'
| Python | 0 |
0c6c9de85161a6c15f84b19d473d992c90a31ea5 | add per transaction coverage increase metrics | mythril/laser/ethereum/plugins/implementations/coverage.py | mythril/laser/ethereum/plugins/implementations/coverage.py | from mythril.laser.ethereum.svm import LaserEVM
from mythril.laser.ethereum.plugins.plugin import LaserPlugin
from mythril.laser.ethereum.state.global_state import GlobalState
from typing import Dict, Tuple, List
import logging
log = logging.getLogger(__name__)
class InstructionCoveragePlugin(LaserPlugin):
"""InstructionCoveragePlugin
This plugin measures the instruction coverage of mythril.
The instruction coverage is the ratio between the instructions that have been executed
and the total amount of instructions.
Note that with lazy constraint solving enabled that this metric will be "unsound" as
reachability will not be considered for the calculation of instruction coverage.
"""
def __init__(self):
self.coverage = {} # type: Dict[str, Tuple[int, List[bool]]]
self.initial_coverage = 0
self.tx_id = 0
def initialize(self, symbolic_vm: LaserEVM):
"""Initializes the instruction coverage plugin
Introduces hooks for each instruction
:param symbolic_vm:
:return:
"""
self.coverage = {}
self.initial_coverage = 0
self.tx_id = 0
@symbolic_vm.laser_hook("stop_sym_exec")
def stop_sym_exec_hook():
# Print results
for code, code_cov in self.coverage.items():
cov_percentage = sum(code_cov[1]) / float(code_cov[0]) * 100
log.info(
"Achieved {:.2f}% coverage for code: {}".format(
cov_percentage, code
)
)
@symbolic_vm.laser_hook("execute_state")
def execute_state_hook(global_state: GlobalState):
# Record coverage
code = global_state.environment.code.bytecode
if code not in self.coverage.keys():
number_of_instructions = len(
global_state.environment.code.instruction_list
)
self.coverage[code] = (
number_of_instructions,
[False] * number_of_instructions,
)
self.coverage[code][1][global_state.mstate.pc] = True
@symbolic_vm.laser_hook("start_sym_trans")
def execute_start_sym_trans_hook():
self.initial_coverage = self._get_covered_instructions()
@symbolic_vm.laser_hook("stop_sym_trans")
def execute_stop_sym_trans_hook():
end_coverage = self._get_covered_instructions()
log.info(
"Number of new instructions covered in tx %d: %d"
% (self.tx_id, end_coverage - self.initial_coverage)
)
self.tx_id += 1
def _get_covered_instructions(self) -> int:
"""Gets the total number of covered instructions for all accounts in
the svm.
:return:
"""
total_covered_instructions = 0
for _, cv in self.coverage.items():
total_covered_instructions += sum(cv[1])
return total_covered_instructions
| from mythril.laser.ethereum.svm import LaserEVM
from mythril.laser.ethereum.plugins.plugin import LaserPlugin
from mythril.laser.ethereum.state.global_state import GlobalState
from typing import Dict, Tuple, List
import logging
log = logging.getLogger(__name__)
class InstructionCoveragePlugin(LaserPlugin):
"""InstructionCoveragePlugin
This plugin measures the instruction coverage of mythril.
The instruction coverage is the ratio between the instructions that have been executed
and the total amount of instructions.
Note that with lazy constraint solving enabled that this metric will be "unsound" as
reachability will not be considered for the calculation of instruction coverage.
"""
def initialize(self, symbolic_vm: LaserEVM):
"""Initializes the instruction coverage plugin
Introduces hooks for each instruction
:param symbolic_vm:
:return:
"""
coverage = {} # type: Dict[str, Tuple[int, List[bool]]]
@symbolic_vm.laser_hook("stop_sym_exec")
def stop_sym_exec_hook():
# Print results
for code, code_cov in coverage.items():
cov_percentage = sum(code_cov[1]) / float(code_cov[0]) * 100
log.info(
"Achieved {:.2f}% coverage for code: {}".format(
cov_percentage, code
)
)
@symbolic_vm.laser_hook("execute_state")
def execute_state_hook(global_state: GlobalState):
# Record coverage
code = global_state.environment.code.bytecode
if code not in coverage.keys():
number_of_instructions = len(
global_state.environment.code.instruction_list
)
coverage[code] = (
number_of_instructions,
[False] * number_of_instructions,
)
coverage[code][1][global_state.mstate.pc] = True
| Python | 0 |
3494ed60343760d3ee520b37b793afa49f225f35 | Add function to check if loop is dummy or not | pitch/lib/logic/control_flow.py | pitch/lib/logic/control_flow.py | from __future__ import unicode_literals
import itertools
import yaml
from ..common.utils import to_iterable
from ..templating.structures import PitchTemplate, JinjaExpressionResolver
class ControlFlowStatement(object):
def __init__(self, statement_type):
self.__statement_type = statement_type
@property
def type(self):
return self.__statement_type
class Conditional(ControlFlowStatement):
def __init__(self, step_context_proxy):
self.__step_context_proxy = step_context_proxy
self.__conditional_default = PitchTemplate('true')
self.__expression = None
self.__value = None
super(Conditional, self).__init__('conditional')
def __reinitialize(self):
self.__expression = None
self.__value = None
@property
def value(self):
return self.__value
@property
def expression(self):
return self.__expression
def evaluate(self):
self.__reinitialize()
context = self.__step_context_proxy.get_context()
default = self.__conditional_default
step_conditional = context.step.get('when', default)
if isinstance(step_conditional, bool):
evaluated_value = step_conditional
self.__expression = str(step_conditional)
else:
resolver = JinjaExpressionResolver(step_context=context)
resolved_value = resolver(step_conditional)
evaluated_value = yaml.safe_load(context.renderer(resolved_value))
self.__expression = step_conditional.as_string()
self.__value = evaluated_value
return self.__value
class Loop(ControlFlowStatement):
def __init__(self, step_context_proxy):
self.__step_context_proxy = step_context_proxy
self.__items = None
self.__command_iterable = None
self.__command = None
self.__command_details = None
super(Loop, self).__init__('loop')
def __reinitialize(self):
self.__items = None
def is_effective(self):
return self.__command is not None
@property
def items(self):
return self.__command_iterable
@property
def command(self):
return self.__command
def set_loop_variable(self, item):
active_context = self.__step_context_proxy.get_context()
active_context.template_context['item'] = item
return item
def evaluate(self):
self.__reinitialize()
step_context = self.__step_context_proxy.get_context()
step = step_context.step
loop_command_key, loop_command_details = step.get_any_item_by_key(
'with_items',
'with_indexed_items',
'with_nested'
)
loop_command_details = to_iterable(loop_command_details)
if loop_command_key is None:
with_items_iterable = [(None,)]
else:
loop_command_details = map(
step_context.renderer,
loop_command_details
)
with_items_iterable = filter(
None,
list(
itertools.product(
*filter(
None,
map(
step_context.template_context.nested_get,
loop_command_details
)
)
)
)
)
if loop_command_key == 'with_indexed_items':
with_items_iterable = enumerate(with_items_iterable)
self.__command_iterable = map(
lambda item: item if len(item) > 1 else item[0],
with_items_iterable
)
self.__command = loop_command_key
self.__command_details = loop_command_details
| from __future__ import unicode_literals
import itertools
import yaml
from ..common.utils import to_iterable
from ..templating.structures import PitchTemplate, JinjaExpressionResolver
class ControlFlowStatement(object):
def __init__(self, statement_type):
self.__statement_type = statement_type
@property
def type(self):
return self.__statement_type
class Conditional(ControlFlowStatement):
def __init__(self, step_context_proxy):
self.__step_context_proxy = step_context_proxy
self.__conditional_default = PitchTemplate('true')
self.__expression = None
self.__value = None
super(Conditional, self).__init__('conditional')
def __reinitialize(self):
self.__expression = None
self.__value = None
@property
def value(self):
return self.__value
@property
def expression(self):
return self.__expression
def evaluate(self):
self.__reinitialize()
context = self.__step_context_proxy.get_context()
default = self.__conditional_default
step_conditional = context.step.get('when', default)
if isinstance(step_conditional, bool):
evaluated_value = step_conditional
self.__expression = str(step_conditional)
else:
resolver = JinjaExpressionResolver(step_context=context)
resolved_value = resolver(step_conditional)
evaluated_value = yaml.safe_load(context.renderer(resolved_value))
self.__expression = step_conditional.as_string()
self.__value = evaluated_value
return self.__value
class Loop(ControlFlowStatement):
def __init__(self, step_context_proxy):
self.__step_context_proxy = step_context_proxy
self.__items = None
self.__command_iterable = None
self.__command = None
self.__command_details = None
super(Loop, self).__init__('loop')
def __reinitialize(self):
self.__items = None
@property
def items(self):
return self.__command_iterable
@property
def command(self):
return self.__command
def set_loop_variable(self, item):
active_context = self.__step_context_proxy.get_context()
active_context.template_context['item'] = item
return item
def evaluate(self):
self.__reinitialize()
step_context = self.__step_context_proxy.get_context()
step = step_context.step
loop_command_key, loop_command_details = step.get_any_item_by_key(
'with_items',
'with_indexed_items',
'with_nested'
)
loop_command_details = to_iterable(loop_command_details)
if loop_command_key is None:
with_items_iterable = [(None,)]
else:
loop_command_details = map(
step_context.renderer,
loop_command_details
)
with_items_iterable = filter(
None,
list(
itertools.product(
*filter(
None,
map(
step_context.template_context.nested_get,
loop_command_details
)
)
)
)
)
if loop_command_key == 'with_indexed_items':
with_items_iterable = enumerate(with_items_iterable)
self.__command_iterable = map(
lambda item: item if len(item) > 1 else item[0],
with_items_iterable
)
self.__command = loop_command_key
self.__command_details = loop_command_details
| Python | 0.000005 |
28c6e6747a6d0bd924f838ed4f846b01c247012b | Remove unused tempproject and tempscene | previz/testsutils.py | previz/testsutils.py | import functools
from . import PrevizProject
class Decorators(object):
def __init__(self, api_token, api_root, new_project_prefix = 'cf-'):
self.api_root = api_root
self.api_token = api_token
self.new_project_prefix = new_project_prefix
def project(self, project_id):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
p = PrevizProject(self.api_root, self.api_token, project_id)
project = p.project(include=['scenes'])
func(project=project, *args, **kwargs)
return wrapper
return decorator
def tempproject(self):
'''Returning an existing project while the API v2 is being worked on'''
return self.project('8d9e684f-0763-4756-844b-d0219a4f3f9a')
def scene(self, scene_id):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
project_id = kwargs['project']['id']
p = PrevizProject(self.api_root, self.api_token, project_id)
scene = p.scene(scene_id, include=[])
func(scene=scene, *args, **kwargs)
#p = PrevizProject(self.api_root, self.api_token, project_id)
#func(project=p.project(include=['scenes']), *args, **kwargs)
return wrapper
return decorator
def tempscene(self):
'''Returning an existing scene while the API v2 is being worked on'''
return self.scene('5a56a895-46ef-4f0f-862c-38ce14f6275b')
| import functools
from . import PrevizProject
class Decorators(object):
def __init__(self, api_token, api_root, new_project_prefix = 'cf-'):
self.api_root = api_root
self.api_token = api_token
self.new_project_prefix = new_project_prefix
def project(self, project_id):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
p = PrevizProject(self.api_root, self.api_token, project_id)
project = p.project(include=['scenes'])
func(project=project, *args, **kwargs)
return wrapper
return decorator
def tempproject(self):
'''Returning an existing project while the API v2 is being worked on'''
return self.project('8d9e684f-0763-4756-844b-d0219a4f3f9a')
def scene(self, scene_id):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
project_id = kwargs['project']['id']
p = PrevizProject(self.api_root, self.api_token, project_id)
scene = p.scene(scene_id, include=[])
func(scene=scene, *args, **kwargs)
#p = PrevizProject(self.api_root, self.api_token, project_id)
#func(project=p.project(include=['scenes']), *args, **kwargs)
return wrapper
return decorator
def tempscene(self):
'''Returning an existing scene while the API v2 is being worked on'''
return self.scene('5a56a895-46ef-4f0f-862c-38ce14f6275b')
def tempproject(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
api_root = os.environ[PREVIZ_API_ROOT_ENVVAR]
api_token = os.environ[PREVIZ_API_TOKEN_ENVVAR]
project_name = 'cf-' + func.__qualname__
p = PrevizProject(api_root, api_token)
#p.project_id = p.new_project(project_name)['id']
p.project_id = 'a5ff9cef-4904-4dc3-8a3c-821a219c891e' # p.project_id
func(project_id=p.project_id, *args, **kwargs)
#p.delete_project()
return wrapper
def tempscene(func):
pass
| Python | 0 |
2528f4ad76dad2915be1d51483cea7693ea453ce | fix test | tests/server4_test.py | tests/server4_test.py | # -*- coding: utf-8 -*-
u"""Test background processes
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
import pytest
def test_elegant(fc):
_r(
fc,
'Compact Storage Ring',
'twissReport',
)
def test_synergia(fc):
_r(
fc,
'Simple FODO',
'bunchReport1',
shared_model='bunchReport2',
)
def test_warppba(fc):
_r(
fc,
'Electron Beam',
'beamPreviewReport',
)
_r(
fc,
'Laser Pulse',
'laserPreviewReport',
)
def _r(fc, sim_name, analysis_model, shared_model=None):
from pykern.pkdebug import pkdp, pkdlog
from sirepo import srunit
from pykern import pkunit
import re
import time
data = fc.sr_sim_data(sim_name)
cancel = None
try:
run = fc.sr_post(
'runSimulation',
PKDict(
forceRun=False,
models=data.models,
report=analysis_model,
simulationId=data.models.simulation.simulationId,
simulationType=data.simulationType,
),
)
import sirepo.sim_data
s = sirepo.sim_data.get_class(fc.sr_sim_type)
pkunit.pkeq('pending', run.state, 'not pending, run={}', run)
cancel = next_request = run.nextRequest
for _ in range(7):
if run.state in ('completed', 'error'):
cancel = None
break
run = fc.sr_post('runStatus', run.nextRequest)
time.sleep(1)
else:
pkunit.pkfail('did not complete: runStatus={}', run)
pkunit.pkeq('completed', run.state)
if shared_model:
next_request.report = shared_model
run = fc.sr_post('runStatus', next_request)
pkunit.pkeq('completed', run.state)
finally:
try:
if cancel:
fc.sr_post('runCancel', cancel)
except Exception:
pass
| # -*- coding: utf-8 -*-
u"""Test background processes
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
import pytest
def test_elegant(fc):
_r(
fc,
'Compact Storage Ring',
'twissReport',
)
def test_synergia(fc):
_r(
fc,
'Simple FODO',
'bunchReport1',
shared_model='bunchReport2',
)
def test_warppba(fc):
_r(
fc,
'Electron Beam',
'beamPreviewReport',
)
_r(
fc,
'Laser Pulse',
'laserPreviewReport',
)
def _r(fc, sim_name, analysis_model, shared_model):
from pykern.pkdebug import pkdp, pkdlog
from sirepo import srunit
from pykern import pkunit
import re
import time
data = fc.sr_sim_data(sim_name)
cancel = None
try:
run = fc.sr_post(
'runSimulation',
PKDict(
forceRun=False,
models=data.models,
report=analysis_model,
simulationId=data.models.simulation.simulationId,
simulationType=data.simulationType,
),
)
import sirepo.sim_data
s = sirepo.sim_data.get_class(fc.sr_sim_type)
pkunit.pkeq('pending', run.state, 'not pending, run={}', run)
cancel = next_request = run.nextRequest
for _ in range(7):
if run.state in ('completed', 'error'):
cancel = None
break
run = fc.sr_post('runStatus', run.nextRequest)
time.sleep(1)
else:
pkunit.pkfail('did not complete: runStatus={}', run)
pkunit.pkeq('completed', run.state)
if shared_model:
next_request.report = shared_model
run = fc.sr_post('runStatus', next_request)
pkunit.pkeq('completed', run.state)
finally:
try:
if cancel:
fc.sr_post('runCancel', cancel)
except Exception:
pass
| Python | 0.000002 |
1b16467c9d24c770578d5f94e9715d754885de98 | correct output at switch on | printStatusChange.py | printStatusChange.py | import re
def readStatus():
status = ''
f = open('/proc/asound/card0/pcm0p/sub0/status', 'r')
for line in f:
matchObj = re.match(r'state.*', line)
if matchObj:
status = matchObj.group()
break
matchObj = re.match(r'closed', line)
if matchObj:
status = matchObj.group()
break
return status
import time
SHUTDOWN_TIME = 10
def do_main_program( console ):
loop = True
shutdownTimer = SHUTDOWN_TIME
power = 0
while loop:
status = readStatus()
if status == "closed":
if power == 1:
if shutdownTimer == 0:
print("Power OFF")
power = 0
else:
shutdownTimer = shutdownTimer-1
print("count down... " + str(shutdownTimer))
else:
if power == 0:
print("Power ON")
power = 1
shutdownTimer = SHUTDOWN_TIME
if shutdownTimer != SHUTDOWN_TIME:
shutdownTimer = SHUTDOWN_TIME
print("Stopping count down (Power is still ON)")
time.sleep(1)
#fp = open('status.log','a')
#fp.write(status+"\n")
#fp.close()
import os
from optparse import OptionParser
import daemon
if __name__ == "__main__":
parser = OptionParser( os.path.relpath(__file__) + " [-c] | [-d]" )
parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=False, help="start as daemon")
parser.add_option("-c", "--console", action="store_true", dest="console", default=False, help="output on console")
(optionen, args) = parser.parse_args()
if optionen.daemon:
with daemon.DaemonContext():
do_main_program(False)
else:
do_main_program(optionen.console)
sys.exit(0)
| import re
def readStatus():
status = ''
f = open('/proc/asound/card0/pcm0p/sub0/status', 'r')
for line in f:
matchObj = re.match(r'state.*', line)
if matchObj:
status = matchObj.group()
break
matchObj = re.match(r'closed', line)
if matchObj:
status = matchObj.group()
break
return status
import time
SHUTDOWN_TIME = 10
def do_main_program( console ):
loop = True
shutdownTimer = SHUTDOWN_TIME
power = 0
while loop:
status = readStatus()
if status == "closed":
if power == 1:
if shutdownTimer == 0:
print("Power OFF")
power = 0
else:
shutdownTimer = shutdownTimer-1
print("count down... " + str(shutdownTimer))
else:
if power == 0:
print("Power ON")
power = 1
if shutdownTimer != SHUTDOWN_TIME:
shutdownTimer = SHUTDOWN_TIME
print("(stop count down) Power still ON")
time.sleep(1)
#fp = open('status.log','a')
#fp.write(status+"\n")
#fp.close()
import os
from optparse import OptionParser
import daemon
if __name__ == "__main__":
parser = OptionParser( os.path.relpath(__file__) + " [-c] | [-d]" )
parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=False, help="start as daemon")
parser.add_option("-c", "--console", action="store_true", dest="console", default=False, help="output on console")
(optionen, args) = parser.parse_args()
if optionen.daemon:
with daemon.DaemonContext():
do_main_program(False)
else:
do_main_program(optionen.console)
sys.exit(0)
| Python | 0.000011 |
f54802514b6d3ba66269c4e09640d2de7a7dade2 | Fix regression in filesystem watcher | watcher/straight_watch_callback.py | watcher/straight_watch_callback.py | #!/usr/bin/env -S python3 -u
import os
import pathlib
import sys
WATCHEXEC_VAR_COMMON = "WATCHEXEC_COMMON_PATH"
WATCHEXEC_VARS = [
"WATCHEXEC_CREATED_PATH",
"WATCHEXEC_REMOVED_PATH",
"WATCHEXEC_RENAMED_PATH",
"WATCHEXEC_WRITTEN_PATH",
"WATCHEXEC_META_CHANGED_PATH",
]
def die(message):
print(message, file=sys.stderr)
sys.exit(1)
def usage():
return "usage: python -m straight_watch_callback <repos-dir> <modified-dir>"
def path_contains(parent, child):
parent = pathlib.Path(parent).resolve()
child = pathlib.Path(child).resolve()
return parent in child.parents
def path_strip(parent, child):
parent = pathlib.Path(parent).parts
child = pathlib.Path(child).parts
return child[len(parent)]
def main(args):
if len(args) != 2:
die(usage())
repos_dir, modified_dir = args
repos_dir = pathlib.Path(repos_dir).resolve()
modified_dir = pathlib.Path(modified_dir).resolve()
paths = []
for var in WATCHEXEC_VARS:
if var in os.environ:
for path in os.environ[var].split(os.pathsep):
paths.append(path)
if not paths:
die("straight_watch_callback.py: watchexec gave no modified files")
if WATCHEXEC_VAR_COMMON in os.environ:
common = os.environ[WATCHEXEC_VAR_COMMON]
# Yes, string concatentation. For some reason when a common
# prefix is used, the individual paths start with a slash even
# though they're actually relative to the prefix.
paths = [common + path for path in paths]
paths = [pathlib.Path(path).resolve() for path in paths]
paths = sorted(set(paths))
repos = set()
for path in paths:
print("detect modification: {}".format(path), file=sys.stderr)
if repos_dir in path.parents:
repo = path.relative_to(repos_dir).parts[0]
repos.add(repo)
if repos:
modified_dir.mkdir(parents=True, exist_ok=True)
repos = sorted(repos)
for repo in repos:
print("--> mark for rebuild: {}".format(repo), file=sys.stderr)
with open(modified_dir / repo, "w"):
pass
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python3 -u
import os
import pathlib
import sys
WATCHEXEC_VAR_COMMON = "WATCHEXEC_COMMON_PATH"
WATCHEXEC_VARS = [
"WATCHEXEC_CREATED_PATH",
"WATCHEXEC_REMOVED_PATH",
"WATCHEXEC_RENAMED_PATH",
"WATCHEXEC_WRITTEN_PATH",
"WATCHEXEC_META_CHANGED_PATH",
]
def die(message):
print(message, file=sys.stderr)
sys.exit(1)
def usage():
return "usage: python -m straight_watch_callback <repos-dir> <modified-dir>"
def path_contains(parent, child):
parent = pathlib.Path(parent).resolve()
child = pathlib.Path(child).resolve()
return parent in child.parents
def path_strip(parent, child):
parent = pathlib.Path(parent).parts
child = pathlib.Path(child).parts
return child[len(parent)]
def main(args):
if len(args) != 2:
die(usage())
repos_dir, modified_dir = args
repos_dir = pathlib.Path(repos_dir).resolve()
modified_dir = pathlib.Path(modified_dir).resolve()
paths = []
for var in WATCHEXEC_VARS:
if var in os.environ:
for path in os.environ[var].split(os.pathsep):
paths.append(path)
if not paths:
die("straight_watch_callback.py: watchexec gave no modified files")
if WATCHEXEC_VAR_COMMON in os.environ:
common = os.environ[WATCHEXEC_VAR_COMMON]
# Yes, string concatentation. For some reason when a common
# prefix is used, the individual paths start with a slash even
# though they're actually relative to the prefix.
paths = [common + path for path in paths]
paths = [pathlib.Path(path).resolve() for path in paths]
paths = sorted(set(paths))
repos = set()
for path in paths:
print("detect modification: {}".format(path), file=sys.stderr)
if repos_dir in path.parents:
repo = path.relative_to(repos_dir).parts[0]
repos.add(repo)
if repos:
modified_dir.mkdir(parents=True, exist_ok=True)
repos = sorted(repos)
for repo in repos:
print("--> mark for rebuild: {}".format(repo), file=sys.stderr)
with open(modified_dir / repo, "w"):
pass
if __name__ == "__main__":
main(sys.argv[1:])
| Python | 0.000019 |
66e9e8a35831e603509d96ac8f1cab7bc8b9a3fc | enforce lot locking when using quick-create in web client | stock_lock_lot/models/stock_production_lot.py | stock_lock_lot/models/stock_production_lot.py | # -*- coding: utf-8 -*-
# © 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# © 2015 AvanzOsc (http://www.avanzosc.es)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, exceptions, _
class StockProductionLot(models.Model):
_name = 'stock.production.lot'
_inherit = ['stock.production.lot', 'mail.thread']
_mail_post_access = 'read'
_track = {
'locked': {
'stock_lock_lot.mt_lock_lot': lambda self, cr, uid, obj,
ctx=None: obj.locked,
'stock_lock_lot.mt_unlock_lot': lambda self, cr, uid, obj,
ctx=None: not obj.locked,
},
}
def _get_product_locked(self, product):
"""Should create locked? (including categories and parents)
@param product: browse-record for product.product
@return True when the category of the product or one of the parents
demand new lots to be locked"""
_locked = product.categ_id.lot_default_locked
categ = product.categ_id.parent_id
while categ and not _locked:
_locked = categ.lot_default_locked
categ = categ.parent_id
return _locked
@api.one
def _get_locked_value(self):
return self._get_product_locked(self.product_id)
locked = fields.Boolean(string='Blocked', default='_get_locked_value',
readonly=True)
@api.one
@api.onchange('product_id')
def onchange_product_id(self):
'''Instruct the client to lock/unlock a lot on product change'''
self.locked = self._get_product_locked(self.product_id)
@api.multi
def button_lock(self):
'''Lock the lot if the reservations allow it'''
stock_quant_obj = self.env['stock.quant']
for lot in self:
cond = [('lot_id', '=', lot.id),
('reservation_id', '!=', False)]
for quant in stock_quant_obj.search(cond):
if quant.reservation_id.state not in ('cancel', 'done'):
raise exceptions.Warning(
_('Error! Serial Number/Lot "%s" currently has '
'reservations.')
% (lot.name))
return self.write({'locked': True})
@api.multi
def button_unlock(self):
return self.write({'locked': False})
# Kept in old API to maintain compatibility
def create(self, cr, uid, vals, context=None):
'''Force the locking/unlocking, ignoring the value of 'locked'.'''
# Web quick-create doesn't provide product_id in vals, but in context
product_id = vals.get('product_id', context.get('product_id', False))
if product_id:
vals['locked'] = self._get_product_locked(
self.pool['product.product'].browse(
cr, uid, product_id, context=context))
return super(StockProductionLot, self).create(
cr, uid, vals, context=context)
@api.multi
def write(self, values):
'''Lock the lot if changing the product and locking is required'''
if 'product_id' in values:
product = self.env['product.product'].browse(
values.get('product_id'))
values['locked'] = self._get_product_locked(product)
return super(StockProductionLot, self).write(values)
| # -*- coding: utf-8 -*-
# © 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# © 2015 AvanzOsc (http://www.avanzosc.es)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, exceptions, _
class StockProductionLot(models.Model):
_name = 'stock.production.lot'
_inherit = ['stock.production.lot', 'mail.thread']
_mail_post_access = 'read'
_track = {
'locked': {
'stock_lock_lot.mt_lock_lot': lambda self, cr, uid, obj,
ctx=None: obj.locked,
'stock_lock_lot.mt_unlock_lot': lambda self, cr, uid, obj,
ctx=None: not obj.locked,
},
}
def _get_product_locked(self, product):
"""Should create locked? (including categories and parents)
@param product: browse-record for product.product
@return True when the category of the product or one of the parents
demand new lots to be locked"""
_locked = product.categ_id.lot_default_locked
categ = product.categ_id.parent_id
while categ and not _locked:
_locked = categ.lot_default_locked
categ = categ.parent_id
return _locked
@api.one
def _get_locked_value(self):
return self._get_product_locked(self.product_id)
locked = fields.Boolean(string='Blocked', default='_get_locked_value',
readonly=True)
@api.one
@api.onchange('product_id')
def onchange_product_id(self):
'''Instruct the client to lock/unlock a lot on product change'''
self.locked = self._get_product_locked(self.product_id)
@api.multi
def button_lock(self):
'''Lock the lot if the reservations allow it'''
stock_quant_obj = self.env['stock.quant']
for lot in self:
cond = [('lot_id', '=', lot.id),
('reservation_id', '!=', False)]
for quant in stock_quant_obj.search(cond):
if quant.reservation_id.state not in ('cancel', 'done'):
raise exceptions.Warning(
_('Error! Serial Number/Lot "%s" currently has '
'reservations.')
% (lot.name))
return self.write({'locked': True})
@api.multi
def button_unlock(self):
return self.write({'locked': False})
# Kept in old API to maintain compatibility
def create(self, cr, uid, vals, context=None):
'''Force the locking/unlocking, ignoring the value of 'locked'.'''
product = self.pool['product.product'].browse(
cr, uid, vals.get('product_id'))
vals['locked'] = self._get_product_locked(product)
return super(StockProductionLot, self).create(
cr, uid, vals, context=context)
@api.multi
def write(self, values):
'''Lock the lot if changing the product and locking is required'''
if 'product_id' in values:
product = self.env['product.product'].browse(
values.get('product_id'))
values['locked'] = self._get_product_locked(product)
return super(StockProductionLot, self).write(values)
| Python | 0 |
707be52f21dae6682cd541d62941c0095869c98f | Update time util docstrings | mycroft/util/time.py | mycroft/util/time.py | #
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Time utils for getting and converting datetime objects for the Mycroft
system. This time is based on the setting in the Mycroft config and may or
may not match the system locale.
"""
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
"""Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
"""Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return to_utc(datetime.utcnow())
def now_local(tz=None):
"""Retrieve the current time
Arguments:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
"""Convert a datetime with timezone info to a UTC datetime
Arguments:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
"""Convert a datetime to the user's local timezone
Arguments:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
def to_system(dt):
"""Convert a datetime to the system's local timezone
Arguments:
dt (datetime): A datetime (if no timezone, assumed to be UTC)
Returns:
(datetime): time converted to the operation system's timezone
"""
tz = tzlocal()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
| #
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
""" Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
""" Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return to_utc(datetime.utcnow())
def now_local(tz=None):
""" Retrieve the current time
Args:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
""" Convert a datetime with timezone info to a UTC datetime
Args:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
""" Convert a datetime to the user's local timezone
Args:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
def to_system(dt):
""" Convert a datetime to the system's local timezone
Args:
dt (datetime): A datetime (if no timezone, assumed to be UTC)
Returns:
(datetime): time converted to the operation system's timezone
"""
tz = tzlocal()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
| Python | 0.000001 |
ee1f59a8a61709bebc4f08adce520ab251576148 | Use a single query to fetch the guardian and hub. | us_ignite/hubs/views.py | us_ignite/hubs/views.py | from django.db.models import Q
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.template.response import TemplateResponse
from django.shortcuts import get_object_or_404, redirect
from us_ignite.hubs.models import Hub, HubRequest
from us_ignite.hubs import forms, mailer
@login_required
def hub_application(request):
"""View to submit a ``Hub`` for consideration"""
object_list = HubRequest.objects.filter(
~Q(status=HubRequest.REMOVED), user=request.user)
if request.method == 'POST':
form = forms.HubRequestForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# Notify US Ignite about this request:
mailer.notify_request(instance)
msg = 'The registration for "%s" has been submited.' % instance.name
messages.success(request, msg)
return redirect('home')
else:
form = forms.HubRequestForm()
context = {
'form': form,
'object_list': object_list,
}
return TemplateResponse(request, 'hubs/object_application.html', context)
def hub_detail(request, slug):
"""Homepage of a Ignite Community.
This view aggregates all the content related to this ``Hub``.
"""
instance = get_object_or_404(
Hub.objects.select_related('guardian'), slug=slug)
if not instance.is_published() and not instance.is_guardian(request.user):
raise Http404
context = {
'object': instance,
}
return TemplateResponse(request, 'hubs/object_detail.html', context)
| from django.db.models import Q
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.template.response import TemplateResponse
from django.shortcuts import get_object_or_404, redirect
from us_ignite.hubs.models import Hub, HubRequest
from us_ignite.hubs import forms, mailer
@login_required
def hub_application(request):
"""View to submit a ``Hub`` for consideration"""
object_list = HubRequest.objects.filter(
~Q(status=HubRequest.REMOVED), user=request.user)
if request.method == 'POST':
form = forms.HubRequestForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# Notify US Ignite about this request:
mailer.notify_request(instance)
msg = 'The registration for "%s" has been submited.' % instance.name
messages.success(request, msg)
return redirect('home')
else:
form = forms.HubRequestForm()
context = {
'form': form,
'object_list': object_list,
}
return TemplateResponse(request, 'hubs/object_application.html', context)
def hub_detail(request, slug):
"""Homepage of a Ignite Community.
This view aggregates all the content related to this ``Hub``.
"""
instance = get_object_or_404(Hub, slug=slug)
if not instance.is_published() and not instance.is_guardian(request.user):
raise Http404
context = {
'object': instance,
}
return TemplateResponse(request, 'hubs/object_detail.html', context)
| Python | 0 |
abd2bbc081167c9306558d6376c691a965e0cf71 | Improve onchage experience | l10n_es_location_nuts/models/res_partner.py | l10n_es_location_nuts/models/res_partner.py | # -*- coding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright :
# (c) 2015 Antiun Ingenieria, SL (Madrid, Spain, http://www.antiun.com)
# Antonio Espinosa <antonioea@antiun.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
from openerp.addons.base_location_nuts.models.res_partner \
import dict_recursive_update
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.multi
def onchange_state(self, state_id):
result = super(ResPartner, self).onchange_state(state_id)
state = self.env['res.country.state'].browse(state_id)
if state.country_id.code == 'ES':
region_id = False
substate_id = False
nuts_state = self.env['res.partner.nuts'].search(
[('level', '=', 4),
('state_id', '=', state.id)])
if nuts_state:
substate = nuts_state[0].parent_id
if substate:
substate_id = substate.id
region = substate.parent_id
if region:
region_id = region.id
changes = {
'domain': {
'substate': [('country_id', '=', 'ES'),
('level', '=', 3)],
'region': [('country_id', '=', 'ES'),
('level', '=', 2)],
},
'value': {
'substate': substate_id,
'region': region_id,
}
}
dict_recursive_update(result, changes)
return result
@api.onchange('substate', 'region')
def onchange_substate_or_region(self):
result = super(ResPartner, self).onchange_substate_or_region()
if (self.state_id.country_id.code == 'ES' or
self.substate.country_id.code == 'ES' or
self.region.country_id.code == 'ES'):
changes = {
'domain': {
'substate': [('country_id', '=', 'ES'),
('level', '=', 3)],
'region': [('country_id', '=', 'ES'),
('level', '=', 2)],
}
}
if self.substate.country_id.code == 'ES':
self.region = self.substate.parent_id
self.country_id = self.substate.country_id
if self.region.country_id.code == 'ES':
self.country_id = self.region.country_id
if self.state_id.country_id.code == 'ES':
self.country_id = self.state_id.country_id
dict_recursive_update(result, changes)
return result
| # -*- coding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright :
# (c) 2015 Antiun Ingenieria, SL (Madrid, Spain, http://www.antiun.com)
# Antonio Espinosa <antonioea@antiun.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
from openerp.addons.base_location_nuts.models.res_partner \
import dict_recursive_update
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.multi
def onchange_state(self, state_id):
result = super(ResPartner, self).onchange_state(state_id)
state = self.env['res.country.state'].browse(state_id)
if state.country_id.code == 'ES':
region_id = False
substate_id = False
nuts_state = self.env['res.partner.nuts'].search(
[('level', '=', 4),
('state_id', '=', state.id)])
if nuts_state:
substate = nuts_state[0].parent_id
if substate:
substate_id = substate.id
region = substate.parent_id
if region:
region_id = region.id
changes = {
'domain': {
'substate': [('country_id', '=', 'ES'),
('level', '=', 3)],
'region': [('country_id', '=', 'ES'),
('level', '=', 2)],
},
'value': {
'substate': substate_id,
'region': region_id,
}
}
dict_recursive_update(result, changes)
return result
| Python | 0.000001 |
0fdb93fb73142315fe404b9a161ef19af0d920cd | Add simple test for config builder | tests/test_bawlerd.py | tests/test_bawlerd.py | import io
import os
from textwrap import dedent
from pg_bawler import bawlerd
class TestBawlerdConfig:
def test_build_config_location_list(self):
assert not bawlerd.conf.build_config_location_list(locations=())
user_conf = os.path.join(
os.path.expanduser('~'),
bawlerd.conf.DEFAULT_CONFIG_FILENAME)
system_conf = os.path.join(
'/etc/pg_bawler',
bawlerd.conf.DEFAULT_CONFIG_FILENAME)
assert user_conf in bawlerd.conf.build_config_location_list()
assert system_conf in bawlerd.conf.build_config_location_list()
def test__load_file(self):
config = bawlerd.conf._load_file(io.StringIO(dedent("""\
logging:
formatters:
standard:
format: \"%(asctime)s %(levelname)s] %(name)s: %(message)s\"
handlers:
default:
level: "INFO"
formatter: standard
class: logging.StreamHandler
loggers:
"":
handlers: ["default"]
level: INFO
propagate: True
""")))
assert 'logging' in config
def test_read_config_files(self):
config_base = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'configs')
locations = [
os.path.join(config_base, 'etc'),
os.path.join(config_base, 'home'),
]
config = bawlerd.conf.read_config_files(
bawlerd.conf.build_config_location_list(locations=locations))
assert config['common']['listen_timeout'] == 40
assert 'logging' in config
| import io
import os
from textwrap import dedent
from pg_bawler import bawlerd
class TestBawlerdConfig:
def test_build_config_location_list(self):
assert not bawlerd.conf.build_config_location_list(locations=())
user_conf = os.path.join(
os.path.expanduser('~'),
bawlerd.conf.DEFAULT_CONFIG_FILENAME)
system_conf = os.path.join(
'/etc/pg_bawler',
bawlerd.conf.DEFAULT_CONFIG_FILENAME)
assert user_conf in bawlerd.conf.build_config_location_list()
assert system_conf in bawlerd.conf.build_config_location_list()
def test__load_file(self):
config = bawlerd.conf._load_file(io.StringIO(dedent("""\
logging:
formatters:
standard:
format: \"%(asctime)s %(levelname)s] %(name)s: %(message)s\"
handlers:
default:
level: "INFO"
formatter: standard
class: logging.StreamHandler
loggers:
"":
handlers: ["default"]
level: INFO
propagate: True
""")))
assert 'logging' in config
| Python | 0 |
e62b62107ef44fec414a3a932a3e3e7bad211f34 | add django-extensions for easier local view debugging | coffeestats/coffeestats/settings/local.py | coffeestats/coffeestats/settings/local.py | # -*- python -*-
# pymode:lint_ignore=W0401,E501
"""Development settings and globals."""
from __future__ import absolute_import
from .base import * # noqa
# ######### DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# ######### END DEBUG CONFIGURATION
# ######### EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# ######### END EMAIL CONFIGURATION
# ######### CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# ######### END CACHE CONFIGURATION
# ######### TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup # noqa
INSTALLED_APPS += (
'debug_toolbar',
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2')
# ######### END TOOLBAR CONFIGURATION
| # -*- python -*-
# pymode:lint_ignore=W0401,E501
"""Development settings and globals."""
from __future__ import absolute_import
from .base import * # noqa
# ######### DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# ######### END DEBUG CONFIGURATION
# ######### EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# ######### END EMAIL CONFIGURATION
# ######### CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# ######### END CACHE CONFIGURATION
# ######### TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup # noqa
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2')
# ######### END TOOLBAR CONFIGURATION
| Python | 0 |
0589fe156e710a97fd08001142ec05dea4bc2b65 | Use other key for exercise strings for react | wger/core/management/commands/extract-i18n.py | wger/core/management/commands/extract-i18n.py | # -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import RepetitionUnit
from wger.exercises.models import (
Equipment,
ExerciseCategory,
Muscle,
)
class Command(BaseCommand):
"""
Helper command to extract translatable content from the database such as
categories, muscles or equipment names and write it to files, so they can
be extracted and translated on weblate. This is a bit hacky and ugly, but
these strings *very* rarely change.
"""
help = 'Write the translatable strings from the database to a file'
def handle(self, **options):
# Replace whitespace with underscores and make lowercase
def cleanup_name(name: str) -> str:
return name.replace(' ', '_').lower()
# Collect all translatable items
data = [i for i in ExerciseCategory.objects.all()] \
+ [i for i in Equipment.objects.all()] \
+ [i for i in Muscle.objects.all()] \
+ [i for i in RepetitionUnit.objects.all()]
# Django - write to .tpl file
with open('wger/i18n.tpl', 'w') as f:
out = '{% load i18n %}\n'
for i in data:
out += f'{{% translate "{i}" %}}\n'
f.write(out)
self.stdout.write(self.style.SUCCESS(f'Wrote content to i18n.tpl!'))
# React - write to .tsx file (copy the file into the react repo)
with open('wger/i18n.tsx', 'w') as f:
out = '''
import { useTranslation } from "react-i18next";
export const DummyComponent = () => {
const [t, i18n] = useTranslation();'''
for i in data:
out += f't("exercises.{cleanup_name(i.__str__())}");\n'
out += '''
return (<p></p>);
};'''
f.write(out)
self.stdout.write(self.style.SUCCESS(f'Wrote content to i18n.tsx!'))
# Flutter - write to .dart file (copy the file into the flutter repo)
# TO BE IMPLEMENTED...
| # -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import RepetitionUnit
from wger.exercises.models import (
Equipment,
ExerciseCategory,
Muscle,
)
class Command(BaseCommand):
"""
Helper command to extract translatable content from the database such as
categories, muscles or equipment names and write it to files, so they can
be extracted and translated on weblate. This is a bit hacky and ugly, but
these strings *very* rarely change.
"""
help = 'Write the translatable strings from the database to a file'
def handle(self, **options):
# Collect all translatable items
data = [i for i in ExerciseCategory.objects.all()] \
+ [i for i in Equipment.objects.all()] \
+ [i for i in Muscle.objects.all()] \
+ [i for i in RepetitionUnit.objects.all()]
# Django - write to .tpl file
with open('wger/i18n.tpl', 'w') as f:
out = '{% load i18n %}\n'
for i in data:
out += f'{{% translate "{i}" %}}\n'
f.write(out)
self.stdout.write(self.style.SUCCESS(f'Wrote content to i18n.tpl!'))
# React - write to .tsx file (copy the file into the react repo)
with open('wger/i18n.tsx', 'w') as f:
out = '''
import { useTranslation } from "react-i18next";
export const DummyComponent = () => {
const [t, i18n] = useTranslation();'''
for i in data:
out += f't("{i}");\n'
out += '''
return (<p></p>);
};'''
f.write(out)
self.stdout.write(self.style.SUCCESS(f'Wrote content to i18n.tsx!'))
# Flutter - write to .dart file (copy the file into the flutter repo)
# TO BE IMPLEMENTED...
| Python | 0.000001 |
bc75dbaecfac0b9afff324c54e6c022b35419f28 | set debug to false | project/web/app/website/settings.py | project/web/app/website/settings.py | """
Django settings for website project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os, logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ejxy^_(!sf_)ps@#2dr*q+x2jkuv0rre3dlm$orh%1*pvj1_jz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
LOG = logging.basicConfig(filename='/tmp/myapp.log', level=logging.DEBUG)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'customers',
'discounts',
'utils',
'products',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
STATIC_ROOT = '/home/docker/code/app/static'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
TEST = False
if not TEST:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| """
Django settings for website project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os, logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ejxy^_(!sf_)ps@#2dr*q+x2jkuv0rre3dlm$orh%1*pvj1_jz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
LOG = logging.basicConfig(filename='/tmp/myapp.log', level=logging.DEBUG)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'customers',
'discounts',
'utils',
'products',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
STATIC_ROOT = '/home/docker/code/app/static'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
TEST = False
if not TEST:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| Python | 0.99987 |
a835dbfbaa2c70329c08d4b8429d49315dc6d651 | Remove dangerous safestring declaration | openstack_dashboard/dashboards/identity/mappings/tables.py | openstack_dashboard/dashboards/identity/mappings/tables.py | # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
class CreateMappingLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Mapping")
url = "horizon:identity:mappings:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_mapping"),)
class EditMappingLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:mappings:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_mapping"),)
class DeleteMappingsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Mapping",
u"Delete Mappings",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Mapping",
u"Deleted Mappings",
count
)
policy_rules = (("identity", "identity:delete_mapping"),)
def delete(self, request, obj_id):
api.keystone.mapping_delete(request, obj_id)
class MappingFilterAction(tables.FilterAction):
def filter(self, table, mappings, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [mapping for mapping in mappings
if q in mapping.ud.lower()]
def get_rules_as_json(mapping):
rules = getattr(mapping, 'rules', None)
if rules:
rules = json.dumps(rules, indent=4)
return rules
class MappingsTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('Mapping ID'))
description = tables.Column(get_rules_as_json,
verbose_name=_('Rules'))
class Meta(object):
name = "idp_mappings"
verbose_name = _("Attribute Mappings")
row_actions = (EditMappingLink, DeleteMappingsAction)
table_actions = (MappingFilterAction, CreateMappingLink,
DeleteMappingsAction)
| # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils import safestring
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
class CreateMappingLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Mapping")
url = "horizon:identity:mappings:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_mapping"),)
class EditMappingLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:mappings:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_mapping"),)
class DeleteMappingsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Mapping",
u"Delete Mappings",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Mapping",
u"Deleted Mappings",
count
)
policy_rules = (("identity", "identity:delete_mapping"),)
def delete(self, request, obj_id):
api.keystone.mapping_delete(request, obj_id)
class MappingFilterAction(tables.FilterAction):
def filter(self, table, mappings, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [mapping for mapping in mappings
if q in mapping.ud.lower()]
def get_rules_as_json(mapping):
rules = getattr(mapping, 'rules', None)
if rules:
rules = json.dumps(rules, indent=4)
return safestring.mark_safe(rules)
class MappingsTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('Mapping ID'))
description = tables.Column(get_rules_as_json,
verbose_name=_('Rules'))
class Meta(object):
name = "idp_mappings"
verbose_name = _("Attribute Mappings")
row_actions = (EditMappingLink, DeleteMappingsAction)
table_actions = (MappingFilterAction, CreateMappingLink,
DeleteMappingsAction)
| Python | 0.000002 |
67bcec666fb1dc1ca48e531fd49bbbc75c09f041 | fix the bug when the method it's call by the onchange (#140) | stock_voucher/models/stock_picking_voucher.py | stock_voucher/models/stock_picking_voucher.py | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models, api, _
from odoo.exceptions import ValidationError
class StockPickingVoucher(models.Model):
_name = 'stock.picking.voucher'
_description = 'Stock Voucher Book'
# _rec_name = 'number'
# we keep this for report compatibility
number = fields.Char(
related='name',
)
# because m2m tags widget send only values to name field
name = fields.Char(
'Number',
copy=False,
required=True,
oldname='number',
)
book_id = fields.Many2one(
'stock.book',
'Voucher Book',
)
picking_id = fields.Many2one(
'stock.picking',
'Picking',
ondelete='cascade',
required=True,
index=True,
)
company_id = fields.Many2one(
'res.company',
'Company',
related='picking_id.company_id',
readonly=True,
)
# constraint de que el book y el picking deben ser de la misma company
_sql_constraints = [
('voucher_number_uniq', 'unique(name, book_id)',
_('The field "Number" must be unique per book.'))]
@api.constrains('name', 'picking_id')
@api.onchange('name', 'picking_id')
def check_voucher_number_unique(self):
"""
Check internal pickings with voucher number unique
"""
for rec in self.filtered(
lambda x: x.picking_id.picking_type_id.code == 'incoming'):
pick_type = rec.picking_id.picking_type_id
name = pick_type.voucher_number_validator_id.validate_value(
rec.name)
if name and name != rec.name:
rec.name = name
if pick_type.voucher_number_unique:
rec._check_voucher_number_unique()
@api.multi
def _check_voucher_number_unique(self):
self.ensure_one()
same_number_recs = self.search([
('picking_id.partner_id', '=',
self.picking_id.partner_id.id),
('name', '=', self.name),
]) - self
if same_number_recs:
raise ValidationError(_(
'Picking voucher number must be unique per '
'partner'))
| ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models, api, _
from odoo.exceptions import ValidationError
class StockPickingVoucher(models.Model):
_name = 'stock.picking.voucher'
_description = 'Stock Voucher Book'
# _rec_name = 'number'
# we keep this for report compatibility
number = fields.Char(
related='name',
)
# because m2m tags widget send only values to name field
name = fields.Char(
'Number',
copy=False,
required=True,
oldname='number',
)
book_id = fields.Many2one(
'stock.book',
'Voucher Book',
)
picking_id = fields.Many2one(
'stock.picking',
'Picking',
ondelete='cascade',
required=True,
index=True,
)
company_id = fields.Many2one(
'res.company',
'Company',
related='picking_id.company_id',
readonly=True,
)
# constraint de que el book y el picking deben ser de la misma company
_sql_constraints = [
('voucher_number_uniq', 'unique(name, book_id)',
_('The field "Number" must be unique per book.'))]
@api.constrains('name', 'picking_id')
@api.onchange('name', 'picking_id')
def check_voucher_number_unique(self):
"""
Check internal pickings with voucher number unique
"""
for rec in self.filtered(
lambda x: x.picking_id.picking_type_id.code == 'incoming'):
pick_type = rec.picking_id.picking_type_id
name = pick_type.voucher_number_validator_id.validate_value(
rec.name)
if name and name != rec.name:
rec.name = name
if pick_type.voucher_number_unique:
rec._check_voucher_number_unique()
@api.multi
def _check_voucher_number_unique(self):
self.ensure_one()
same_number_recs = self.search([
('picking_id.partner_id', '=',
self.picking_id.partner_id.id),
('name', '=', self.name),
('id', '!=', self.id),
])
if same_number_recs:
raise ValidationError(_(
'Picking voucher number must be unique per '
'partner'))
| Python | 0 |
4b0902e960f08f9ef99ce211b98e211b1685f68b | Update walletchangepass.py | contrib/wallettools/walletchangepass.py | contrib/wallettools/walletchangepass.py | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:6647")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:46393")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2) | Python | 0.000001 |
af0a932e8097701179310501d75888d7d4617dac | verifica codice con Schema dello SdI per fatturapa | l10n_it_payment_reason/tests/test_reason.py | l10n_it_payment_reason/tests/test_reason.py | from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase
from odoo.addons.l10n_it_account.tools.account_tools import fpa_schema_get_enum
class TestReasons(TransactionCase):
def setUp(self):
super(TestReasons, self).setUp()
self.reason_model = self.env["payment.reason"]
self.reason_b = self.env.ref("l10n_it_payment_reason.b")
def test_reasons(self):
with self.assertRaises(ValidationError):
self.reason_model.create({"code": "B", "name": "Test"})
name = self.reason_b.name_get()
self.assertEqual(
name,
[
(
self.reason_b.id,
"B - Utilizzazione economica, da parte dell'autore ...",
)
],
)
def test_compare_with_fpa_schema(self):
"""Check that the values we define in this module are
the same as those defined in FPA xsd"""
my_codes = self.reason_model.search([]).mapped("code")
# from fatturapa xml Schema
xsd_codes = [
code for code, descr in fpa_schema_get_enum("CausalePagamentoType")
]
# XXX hardcoded - obsolete code, that is still supported by Schema
xsd_codes.remove("Z")
self.assertCountEqual(my_codes, xsd_codes)
| from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase
class TestReasons(TransactionCase):
def setUp(self):
super(TestReasons, self).setUp()
self.reason_model = self.env["payment.reason"]
self.reason_b = self.env.ref("l10n_it_payment_reason.b")
def test_reasons(self):
with self.assertRaises(ValidationError):
self.reason_model.create({"code": "B", "name": "Test"})
name = self.reason_b.name_get()
self.assertEqual(
name,
[
(
self.reason_b.id,
"B - Utilizzazione economica, da parte dell'autore ...",
)
],
)
| Python | 0 |
c59a2ce0a6b164e7af26d73a553349937642c370 | write frozen graph instead of just the graph def | write_pb_file.py | write_pb_file.py | #!/usr/bin/env python3
from gnt_model import model, error_rate, IMAGE_HEIGHT, IMAGE_WIDTH, PIXEL_DEPTH
import os
import sys
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.framework import graph_util
def main():
if len(sys.argv) != 3:
print('Usage: {} checkpoint_path output_dir'.format(sys.argv[0]))
sys.exit()
_, checkpoint_path, output_dir = sys.argv
node_image_raw = tf.placeholder("float", shape=[None, 784], name="input")
node_normalized_image = tf.reshape(node_image_raw, [1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) / PIXEL_DEPTH - 0.5
node_logits = model(node_normalized_image)
node_predictions = tf.nn.softmax(node_logits, name="output")
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
print('loading model')
sess.run(init_op)
saver.restore(sess, checkpoint_path)
pb_filename = os.path.join(output_dir, 'frozen_character_model_graph.pb')
graph_def = tf.get_default_graph().as_graph_def()
for node in graph_def.node:
node.device = ""
output_graph_def = graph_util.convert_variables_to_constants(
sess,
graph_def,
['output'])
print('writing {}'.format(pb_filename))
with gfile.GFile(pb_filename, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
from gnt_model import model, error_rate, IMAGE_HEIGHT, IMAGE_WIDTH, PIXEL_DEPTH
import sys
import tensorflow as tf
def main():
if len(sys.argv) != 3:
print('Usage: {} modelpath outputdir'.format(sys.argv[0]))
sys.exit()
_, model_path, output_dir = sys.argv
node_image_raw = tf.placeholder("float", shape=[None, 784], name="input")
node_normalized_image = tf.reshape(node_image_raw, [1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) / PIXEL_DEPTH - 0.5
node_logits = model(node_normalized_image)
node_predictions = tf.nn.softmax(node_logits, name="output")
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
print('loading model')
sess.run(init_op)
saver.restore(sess, model_path)
pb_filename = 'character_model_graph.pb.txt'
print('writing {}'.format(pb_filename))
graph_def = tf.get_default_graph().as_graph_def()
tf.train.write_graph(graph_def, output_dir, pb_filename, as_text=True)
if __name__ == '__main__':
main()
| Python | 0.000006 |
6ecccc9d5600f46e4810ab3fdf82d29268105abc | use watchdog instead of bare inotifyx for recursive file system monitoring | purkinje_pytest/testrunner.py | purkinje_pytest/testrunner.py | # -*- coding: utf-8 -*-
"""Automatic test execution"""
from __future__ import print_function
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import logging
import time
logger = logging.getLogger(__file__)
#WATCH_MASK = inotify.IN_CLOSE_WRITE | inotify.IN_DELETE
# Extensions of files to be watched
EXTENSIONS = ['.py']
class Handler(FileSystemEventHandler):
"""Triggers test execution when project contents change
"""
def on_created(self, event):
self._trigger(event)
def on_deleted(self, event):
self._trigger(event)
def on_modified(self, event):
self._trigger(event)
def on_moved(self, event):
self._trigger(event)
def _filter(self, path):
"""Determine whether a file is relevant to test execution"""
return path.endswith('.py')
def _trigger(self, event):
if self._filter(event.src_path):
print('##### Trigger: {} ####'.format(event))
class TestRunner:
"""Watches project directory and executes test when relevant files
have been changed
TODO: ensure that ulimit is sufficient for number of
files to be monitored
"""
def __init__(self, dir):
self._dir = dir
#self._fd = inotify.init()
# self._wd = inotify.add_watch(self._fd,
# self._dir,
# WATCH_MASK)
self.event_handler = Handler()
self.observer = Observer()
def start(self):
"""Watch directory forever and execute test cases
"""
print('{}: watching directory "{}"'.format(self.__class__, self._dir))
self.observer.schedule(self.event_handler, self._dir, recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.observer.stop()
def _handle_event(self, e):
print('Event: {}'.format(e))
def main():
fw = TestRunner('.')
fw.start()
| # -*- coding: utf-8 -*-
"""Automatic test execution"""
from __future__ import print_function
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
import logging
import time
logger = logging.getLogger(__file__)
#WATCH_MASK = inotify.IN_CLOSE_WRITE | inotify.IN_DELETE
# Extensions of files to be watched
EXTENSIONS = ['.py']
class TestRunner:
"""Watches project directory and executes test when relevant files
have been changed
TODO: ensure that ulimit is sufficient for number of
files to be monitored
"""
def __init__(self, dir):
self._dir = dir
#self._fd = inotify.init()
# self._wd = inotify.add_watch(self._fd,
# self._dir,
# WATCH_MASK)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.event_handler = LoggingEventHandler()
self.observer = Observer()
def start(self):
"""Watch directory forever and execute test cases
"""
print('{}: watching directory "{}"'.format(self.__class__, self._dir))
self.observer.schedule(self.event_handler, self._dir, recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.observer.stop()
def _filter(self, events):
"""Select files that are relevant to test execution"""
print('Before filter: {}'.format(events))
for event in events:
n = event.name
if n.endswith('.py'):
yield event
def _handle_event(self, e):
print('Event: {}'.format(e))
def main():
fw = TestRunner('.')
fw.start()
| Python | 0 |
c052aaca11f8fdcd465ee986548db8141b27a197 | bump to 0.7.2 | hublib/__init__.py | hublib/__init__.py | from pint import UnitRegistry
ureg = UnitRegistry()
ureg.autoconvert_offset_to_baseunit = True
Q_ = ureg.Quantity
__version__ = "0.7.2"
| from pint import UnitRegistry
ureg = UnitRegistry()
ureg.autoconvert_offset_to_baseunit = True
Q_ = ureg.Quantity
__version__ = "0.7.1"
| Python | 0.000013 |
8a0a00a688d2a7edb6b03a505d4ededf72730886 | Remove useless import | zinnia/views/trackback.py | zinnia/views/trackback.py | """Views for Zinnia trackback"""
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.http import HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import TemplateView
import django_comments as comments
from zinnia.flags import TRACKBACK
from zinnia.flags import get_user_flagger
from zinnia.models.entry import Entry
from zinnia.signals import trackback_was_posted
from zinnia.spam_checker import check_is_spam
class EntryTrackback(TemplateView):
"""
View for handling trackbacks on the entries.
"""
content_type = 'text/xml'
template_name = 'zinnia/entry_trackback.xml'
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
"""
Decorate the view dispatcher with csrf_exempt.
"""
return super(EntryTrackback, self).dispatch(*args, **kwargs)
def get_object(self):
"""
Retrieve the Entry trackbacked.
"""
return get_object_or_404(Entry.published, pk=self.kwargs['pk'])
def get(self, request, *args, **kwargs):
"""
GET only do a permanent redirection to the Entry.
"""
entry = self.get_object()
return HttpResponsePermanentRedirect(entry.get_absolute_url())
def post(self, request, *args, **kwargs):
"""
Check if an URL is provided and if trackbacks
are enabled on the Entry.
If so the URL is registered one time as a trackback.
"""
url = request.POST.get('url')
if not url:
return self.get(request, *args, **kwargs)
entry = self.get_object()
site = Site.objects.get_current()
if not entry.trackbacks_are_open:
return self.render_to_response(
{'error': 'Trackback is not enabled for %s' % entry.title})
title = request.POST.get('title') or url
excerpt = request.POST.get('excerpt') or title
blog_name = request.POST.get('blog_name') or title
ip_address = request.META.get('REMOTE_ADDR', None)
trackback_klass = comments.get_model()
trackback_datas = {
'content_type': ContentType.objects.get_for_model(Entry),
'object_pk': entry.pk,
'site': site,
'user_url': url,
'user_name': blog_name,
'ip_address': ip_address,
'comment': excerpt
}
trackback = trackback_klass(**trackback_datas)
if check_is_spam(trackback, entry, request):
return self.render_to_response(
{'error': 'Trackback considered like spam'})
trackback_defaults = {'comment': trackback_datas.pop('comment')}
trackback, created = trackback_klass.objects.get_or_create(
defaults=trackback_defaults,
**trackback_datas)
if created:
trackback.flags.create(user=get_user_flagger(), flag=TRACKBACK)
trackback_was_posted.send(trackback.__class__,
trackback=trackback,
entry=entry)
else:
return self.render_to_response(
{'error': 'Trackback is already registered'})
return self.render_to_response({})
| """Views for Zinnia trackback"""
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.http import HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import TemplateView
import django_comments as comments
from zinnia.flags import TRACKBACK
from zinnia.flags import get_user_flagger
from zinnia.models.entry import Entry
from zinnia.signals import trackback_was_posted
from zinnia.spam_checker import check_is_spam
class EntryTrackback(TemplateView):
"""
View for handling trackbacks on the entries.
"""
content_type = 'text/xml'
template_name = 'zinnia/entry_trackback.xml'
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
"""
Decorate the view dispatcher with csrf_exempt.
"""
return super(EntryTrackback, self).dispatch(*args, **kwargs)
def get_object(self):
"""
Retrieve the Entry trackbacked.
"""
return get_object_or_404(Entry.published, pk=self.kwargs['pk'])
def get(self, request, *args, **kwargs):
"""
GET only do a permanent redirection to the Entry.
"""
entry = self.get_object()
return HttpResponsePermanentRedirect(entry.get_absolute_url())
def post(self, request, *args, **kwargs):
"""
Check if an URL is provided and if trackbacks
are enabled on the Entry.
If so the URL is registered one time as a trackback.
"""
url = request.POST.get('url')
if not url:
return self.get(request, *args, **kwargs)
entry = self.get_object()
site = Site.objects.get_current()
if not entry.trackbacks_are_open:
return self.render_to_response(
{'error': 'Trackback is not enabled for %s' % entry.title})
title = request.POST.get('title') or url
excerpt = request.POST.get('excerpt') or title
blog_name = request.POST.get('blog_name') or title
ip_address = request.META.get('REMOTE_ADDR', None)
trackback_klass = comments.get_model()
trackback_datas = {
'content_type': ContentType.objects.get_for_model(Entry),
'object_pk': entry.pk,
'site': site,
'user_url': url,
'user_name': blog_name,
'ip_address': ip_address,
'comment': excerpt
}
trackback = trackback_klass(**trackback_datas)
if check_is_spam(trackback, entry, request):
return self.render_to_response(
{'error': 'Trackback considered like spam'})
trackback_defaults = {'comment': trackback_datas.pop('comment')}
trackback, created = trackback_klass.objects.get_or_create(
defaults=trackback_defaults,
**trackback_datas)
if created:
trackback.flags.create(user=get_user_flagger(), flag=TRACKBACK)
trackback_was_posted.send(trackback.__class__,
trackback=trackback,
entry=entry)
else:
return self.render_to_response(
{'error': 'Trackback is already registered'})
return self.render_to_response({})
| Python | 0.000004 |
8959801a293fa33cb7625b524c6e9226af7253b2 | Use separate HITTypeIds for live vs. sandbox | recruiters.py | recruiters.py | """Recruiters of judicious humans."""
import logging
import os
import boto3
# Set up logging.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [recruiter.1]: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
class Recruiter(object):
"""Generic recruiter."""
def recruit(self):
raise NotImplementedError
class HotAirRecruiter(Recruiter):
"""Talks about recruiting, but does not recruit."""
def recruit(self):
logger.info("Recruiting a participant.")
class MTurkRecruiter(Recruiter):
"""Recruits from Amazon Mechanical Turk."""
def __init__(self):
self.mode = os.environ["JUDICIOUS_MTURK_MODE"]
if self.mode == "sandbox":
self._client = boto3.client(
service_name='mturk',
endpoint_url='https://mturk-requester-sandbox.us-east-1.amazonaws.com',
)
elif self.mode == "live":
self._client = boto3.client(
service_name='mturk',
region_name="us-east-1",
)
def _print_balance(self):
balance = self.client.get_account_balance()['AvailableBalance']
logger.info("Current MTurk balance is ${}.".format(balance))
def recruit(self):
if self.mode == "sandbox":
HITTypeId = os.environ["JUDICIOUS_MTURK_HIT_TYPE_ID_SANDBOX"]
elif self.mode == "live":
HITTypeId = os.environ["JUDICIOUS_MTURK_HIT_TYPE_ID_LIVE"]
response = self._client.create_hit_with_hit_type(
HITTypeId=HITTypeId,
MaxAssignments=1,
LifetimeInSeconds=int(os.environ["JUDICIOUS_MTURK_LIFETIME"]),
Question=open("external.xml", "r").read(),
)
logger.info("Created HIT with ID {}".format(response['HIT']['HITId']))
| """Recruiters of judicious humans."""
import logging
import os
import boto3
# Set up logging.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [recruiter.1]: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
class Recruiter(object):
"""Generic recruiter."""
def recruit(self):
raise NotImplementedError
class HotAirRecruiter(Recruiter):
"""Talks about recruiting, but does not recruit."""
def recruit(self):
logger.info("Recruiting a participant.")
class MTurkRecruiter(Recruiter):
"""Recruits from Amazon Mechanical Turk."""
def __init__(self):
self.mode = os.environ["JUDICIOUS_MTURK_MODE"]
if self.mode == "sandbox":
self._client = boto3.client(
service_name='mturk',
endpoint_url='https://mturk-requester-sandbox.us-east-1.amazonaws.com',
)
elif self.mode == "live":
self._client = boto3.client(
service_name='mturk',
region_name="us-east-1",
)
def _print_balance(self):
balance = self.client.get_account_balance()['AvailableBalance']
logger.info("Current MTurk balance is ${}.".format(balance))
def recruit(self):
response = self._client.create_hit_with_hit_type(
HITTypeId=os.environ["JUDICIOUS_MTURK_HIT_TYPE_ID"],
MaxAssignments=1,
LifetimeInSeconds=int(os.environ["JUDICIOUS_MTURK_LIFETIME"]),
Question=open("external.xml", "r").read(),
)
logger.info("Created HIT with ID {}".format(response['HIT']['HITId']))
| Python | 0.000001 |
3ca6affb630f0dea9b414f5405acae7a20f213d2 | add request apis | crawler/spiders/movies_spider.py | crawler/spiders/movies_spider.py | # -*- coding: utf-8 -*-
import scrapy
import json
from core.models import db, Movie
from crawler.spiders.subjects_spider import save_subject_detail
class MoviesSpider(scrapy.Spider):
name = 'MoviesSpider'
allowed_domains = ['douban.com']
start_urls = [
'https://frodo.douban.com/jsonp/subject_collection/movie_showing/items?start=0&count=100',
'https://frodo.douban.com/jsonp/subject_collection/movie_latest/items?start=0&count=100',
# 'https://api.douban.com/v2/movie/coming_soon',
# 'https://api.douban.com/v2/movie/in_theaters',
# 'https://api.douban.com/v2/movie/top250',
]
def parse(self, response):
res = json.loads(response.body)
for subject in res['subjects']:
save_subject_detail(subject['id'])
| # -*- coding: utf-8 -*-
import scrapy
import json
from core.models import db, Movie
from crawler.spiders.subjects_spider import save_subject_detail
class MoviesSpider(scrapy.Spider):
name = 'MoviesSpider'
allowed_domains = ['douban.com']
start_urls = [
'https://frodo.douban.com/jsonp/subject_collection/movie_showing/items?os=ios&callback=&start=0&count=30',
# 'https://api.douban.com/v2/movie/coming_soon',
# 'https://api.douban.com/v2/movie/in_theaters',
# 'https://api.douban.com/v2/movie/top250',
]
def parse(self, response):
res = json.loads(response.body)
for subject in res['subjects']:
save_subject_detail(subject['id'])
| Python | 0 |
ac7090293c4686b071ca314c37304b2eeddec766 | Fix isort problems | pretix_cartshare/migrations/0002_auto_20161008_1047.py | pretix_cartshare/migrations/0002_auto_20161008_1047.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-08 10:47
from __future__ import unicode_literals
import pretix_cartshare.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretix_cartshare', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sharedcart',
name='cart_id',
field=models.CharField(default=pretix_cartshare.models.generate_cart_id, max_length=255, verbose_name='Cart ID'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-08 10:47
from __future__ import unicode_literals
from django.db import migrations, models
import pretix_cartshare.models
class Migration(migrations.Migration):
dependencies = [
('pretix_cartshare', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sharedcart',
name='cart_id',
field=models.CharField(default=pretix_cartshare.models.generate_cart_id, max_length=255, verbose_name='Cart ID'),
),
]
| Python | 0.000002 |
6b6de38e9d1c77f4cc6edcfb0982ff6162875f53 | Update scraper tool | tools/va_scraper/feed.py | tools/va_scraper/feed.py | import urllib
import sys
import feedparser
import os
import errno
import socket
socket.setdefaulttimeout(10)
if len(sys.argv) != 3:
print('Feed parser and downloader v1.0')
print('Usage: feed.py [base url] [relative start atom feed url]')
quit()
base_url = sys.argv[1]
parse_url = base_url+sys.argv[2]
list_of_urls_to_download_structure=list()
def add_to_list(rel_url):
if rel_url.startswith('/'):
rel_url = rel_url[1:]
if rel_url.startswith(base_url):
list_of_urls_to_download_structure.append(rel_url)
else:
list_of_urls_to_download_structure.append(base_url+rel_url)
def make_sure_directory_exists(file_name_and_path):
try:
os.makedirs(os.path.abspath(os.path.dirname(file_name_and_path)))
except OSError, exc:
if exc.errno != errno.EEXIST:
raise
return file_name_and_path
def replace_in_file(file_name_and_path, text_to_search, text_to_replace_with):
tmp_file_name = file_name_and_path+'.tmp'
read_file = open(file_name_and_path, 'r')
write_file = open(tmp_file_name, 'w')
for line in read_file:
write_file.write(line.replace(text_to_search, text_to_replace_with))
read_file.close()
write_file.close()
os.remove(file_name_and_path)
os.rename(tmp_file_name, file_name_and_path)
def create_html_dummy_file(file_name):
write_file = open(file_name, 'w')
write_file.write('<html><body>Missing</body></html>')
write_file.close()
# Go throu atom feed and store all links
while parse_url:
print ('Processing ' + parse_url)
add_to_list(parse_url)
d = feedparser.parse(parse_url)
# for entry in d.entries:
# if hasattr(entry.content[0], 'src'):
# if entry.content[0].src:
# add_to_list(entry.content[0].src)
# if hasattr(entry, 'links'):
# for link in entry.links:
# if link.rel in ['alternate','enclosure']:
# add_to_list(link.href)
parse_url = ''
if hasattr(d.feed, 'links'):
for link in d.feed.links:
if hasattr(link, 'href'):
if link.rel == 'prev-archive':
if (link.href.startswith(base_url)):
parse_url = link.href
else:
parse_url = base_url+link.href
else:
print ('Missing links cannot continue traversing atom feed')
socket.setdefaulttimeout(10)
# Download all links in to usable structure
print ('Found %i item(s)' % len(list_of_urls_to_download_structure) )
for item in list_of_urls_to_download_structure:
print ('Downloading '+item)
try:
local_filename_and_path = item[len(base_url):]
local_filename = os.path.basename(local_filename_and_path)
urllib.urlretrieve(item, make_sure_directory_exists(local_filename))
if local_filename_and_path.startswith('atom'):
replace_in_file(local_filename, base_url+'atom/', '')
replace_in_file(local_filename, base_url+'rdf/', '')
replace_in_file(local_filename, base_url+'html/', '')
except IOError:
print ('IOError: failed to download %s' % item)
if local_filename.endswith('.html'):
create_html_dummy_file(local_filename)
| import urllib
import sys
import feedparser
import os
import errno
import socket
# set over all time out to 1 sec to prevent stalling
socket.setdefaulttimeout(1)
if len(sys.argv) != 3:
print('Feed parser and downloader v1.0')
print('Usage: feed.py [base url] [relative start atom feed url]')
quit()
base_url = sys.argv[1]
parse_url = base_url+sys.argv[2]
list_of_urls_to_download_structure=list()
def add_to_list(rel_url):
if rel_url.startswith('/'):
rel_url = rel_url[1:]
if rel_url.startswith(base_url):
list_of_urls_to_download_structure.append(rel_url)
else:
list_of_urls_to_download_structure.append(base_url+rel_url)
def make_sure_directory_exists(file_name_and_path):
try:
os.makedirs(os.path.abspath(os.path.dirname(file_name_and_path)))
except OSError, exc:
if exc.errno != errno.EEXIST:
raise
return file_name_and_path
# Go throu atom feed and store all links
while parse_url:
print ('Processing '+parse_url)
add_to_list(parse_url)
d = feedparser.parse(parse_url)
for entry in d.entries:
try:
if entry.content[0].src:
add_to_list(entry.content[0].src)
except AttributeError:
pass
try:
for link in entry.links:
if link.rel in ['alternate','enclosure']:
add_to_list(link.href)
except AttributeError:
pass
parse_url = ''
try:
for link in d.feed.links:
if link.rel == 'prev-archive':
parse_url = base_url+link.href
except AttributeError:
pass
# Download all links in to usable structure
print ('Found %i item(s)' % len(list_of_urls_to_download_structure) )
for item in list_of_urls_to_download_structure:
print ('Downloading '+item)
try:
urllib.urlretrieve(item, make_sure_directory_exists(item[len(base_url):]))
except IOError:
print ('IOError: failed to download %s' % item)
| Python | 0 |
bbb9ecacd59186f07f0120d154e625fabd5a87af | Set name on vpc | touchdown/aws/vpc/vpc.py | touchdown/aws/vpc/vpc.py | # Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore import session
from touchdown.core.resource import Resource
from touchdown.core.policy import Policy
from touchdown.core.action import Action
from touchdown.core.argument import String
from touchdown.core import errors
class VPCMixin(object):
def __init__(self, *args, **kwargs):
super(VPCMixin, self).__init__(*args, **kwargs)
self.session = session.Session()
# self.session.set_credentials(aws_access_key_id, aws_secret_access_key)
self.service = self.session.get_service("ec2")
self.endpoint = self.service.get_endpoint("eu-west-1")
class VPC(Resource):
""" A DNS zone hosted at Amazon Route53 """
resource_name = "vpc"
subresources = [
]
name = String()
cidr_block = String()
class AddVPC(Action):
description = "Add virtual private cloud'%(name)s'"
def run(self):
operation = self.policy.service.get_operation("CreateVpc")
response, data = operation.call(
self.policy.endpoint,
CidrBlock=self.resource.cidr_block,
)
if response.status_code != 200:
raise errors.Error("Unable to create VPC")
# FIXME: Create and invoke CreateTags to set the name here.
class CreateTags(Action):
description = "Set tags on resource '%(comment)s'"
def __init__(self, policy, resources, tags):
super(CreateTags, self).__init__(policy)
self.resources = resources
self.tags = tags
def run(self):
operation = self.policy.service.get_operation("CreateTags")
response, data = operation.call(
self.policy.endpoint,
Resources=self.resources,
Tags=[{"Key": k, "Value": v} for k, v in self.tags.items()],
)
if response.status_code != 200:
raise errors.Error("Failed to update hosted zone comment")
class Apply(Policy, VPCMixin):
name = "apply"
resource = VPC
default = True
def get_vpc(self):
operation = self.service.get_operation("DescribeVpcs")
response, data = operation.call(self.endpoint)
for vpc in data['Vpcs']:
if vpc['CidrBlock'] == self.resource.cidr_block:
return vpc
def get_actions(self, runner):
zone = self.get_vpc()
if not zone:
yield AddVPC(self)
return
tags = dict((v["Key"], v["Value"]) for v in zone.get('Tags', []))
if tags.get('name', '') != self.resource.name:
yield CreateTags(
self,
resources=[zone['VpcId']],
tags={"name": self.resource.name}
)
| # Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore import session
from touchdown.core.resource import Resource
from touchdown.core.policy import Policy
from touchdown.core.action import Action
from touchdown.core.argument import String
from touchdown.core import errors
class VPCMixin(object):
def __init__(self, *args, **kwargs):
super(VPCMixin, self).__init__(*args, **kwargs)
self.session = session.Session()
# self.session.set_credentials(aws_access_key_id, aws_secret_access_key)
self.service = self.session.get_service("ec2")
self.endpoint = self.service.get_endpoint("eu-west-1")
class VPC(Resource):
""" A DNS zone hosted at Amazon Route53 """
resource_name = "vpc"
subresources = [
]
name = String()
cidr_block = String()
class AddVPC(Action):
description = "Add virtual private cloud'%(name)s'"
def run(self):
operation = self.policy.service.get_operation("CreateVpc")
response, data = operation.call(
self.policy.endpoint,
CidrBlock=self.resource.cidr_block,
)
if response.status_code != 200:
raise errors.Error("Unable to create VPC")
"""
class UpdateVPCName(Action):
description = "Change zone comment to '%(comment)s'"
def __init__(self, policy, zone_id):
super(UpdateHostedZoneComment, self).__init__(policy)
self.zone_id = zone_id
def run(self):
operation = self.policy.service.get_operation("UpdateHostedZoneComment")
response, data = operation.call(
self.policy.endpoint,
Id=self.zone_id,
Comment=self.resource.comment,
)
if response.status_code != 200:
raise errors.Error("Failed to update hosted zone comment")
"""
class Apply(Policy, VPCMixin):
name = "apply"
resource = VPC
default = True
def get_vpc(self):
operation = self.service.get_operation("DescribeVpcs")
response, data = operation.call(self.endpoint)
for vpc in data['Vpcs']:
if vpc['CidrBlock'] == self.resource.cidr_block:
return vpc
def get_actions(self, runner):
zone = self.get_vpc()
if not zone:
yield AddVPC(self)
| Python | 0.000001 |
1b7289dd8b72cb67fe5c369e99b60e5cd0e85958 | Fix neovim support | ftplugin/markdown/follow_markdown_links.py | ftplugin/markdown/follow_markdown_links.py | import os
import re
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import vim
DEFAULT_EXTENSION = 'md'
MAX_LINE_LEN = 1024
def _extract_link_under_cursor():
_, col = vim.current.window.cursor
line = vim.current.line
# skip long lines to stop hogging CPU in vim
if len(line) >= MAX_LINE_LEN:
return
# find the markdown link substring from line
start_pos = line[:col].rfind("[")
if start_pos < 0: return
end_pos = line[col:].rfind(")")
if end_pos < 0: return
end_pos += (col + 1)
link = line[start_pos:end_pos]
return link
def _is_local_link(link):
link = urlparse(link)
return not link.netloc
def _resolve_link(link):
buf_path = os.path.dirname(vim.current.buffer.name)
return os.path.join(buf_path, link)
def _ensure_extension(link):
name = os.path.basename(link)
if '.' not in name:
return link + '.' + DEFAULT_EXTENSION
return link
def follow_link():
link = _extract_link_under_cursor()
if not link: return
# extract link text and link url
link = re.findall(r'^\[([^]]*)\]\(([^)]*)\)$', link)
if not link: return
# if not local link then stop
text, link = link[0]
if not _is_local_link(link): return
# Support [Text]() cases; Assume Text as link
# Also assume default extension
if not link: link = text
link = _ensure_extension(link)
# Resolve link (if relative) with relation
# to current file in buffer
link = _resolve_link(link)
# Open if exists
if os.path.exists(link):
return vim.command('e %s' % link)
# Directory path does not exist. Ask user to create it.
dirpath = os.path.dirname(link)
if not os.path.exists(dirpath):
confirm_fn = Function('confirm')
msg = '"%s" does not exist. create?' % dirpath
result = confirm_fn(msg, "&Yes\n&No")
if result != 1: return
os.makedirs(dirpath)
# Open as new file
return vim.command('e %s' % link)
| import re
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from vim import *
DEFAULT_EXTENSION = 'md'
MAX_LINE_LEN = 1024
def _extract_link_under_cursor():
_, col = current.window.cursor
line = current.line
# skip long lines to stop hogging CPU in vim
if len(line) >= MAX_LINE_LEN:
return
# find the markdown link substring from line
start_pos = line[:col].rfind("[")
if start_pos < 0: return
end_pos = line[col:].rfind(")")
if end_pos < 0: return
end_pos += (col + 1)
link = line[start_pos:end_pos]
return link
def _is_local_link(link):
link = urlparse(link)
return not link.netloc
def _resolve_link(link):
buf_path = os.path.dirname(current.buffer.name)
return os.path.join(buf_path, link)
def _ensure_extension(link):
name = os.path.basename(link)
if '.' not in name:
return link + '.' + DEFAULT_EXTENSION
return link
def follow_link():
link = _extract_link_under_cursor()
if not link: return
# extract link text and link url
link = re.findall(r'^\[([^]]*)\]\(([^)]*)\)$', link)
if not link: return
# if not local link then stop
text, link = link[0]
if not _is_local_link(link): return
# Support [Text]() cases; Assume Text as link
# Also assume default extension
if not link: link = text
link = _ensure_extension(link)
# Resolve link (if relative) with relation
# to current file in buffer
link = _resolve_link(link)
# Open if exists
if os.path.exists(link):
return command('e %s' % link)
# Directory path does not exist. Ask user to create it.
dirpath = os.path.dirname(link)
if not os.path.exists(dirpath):
confirm_fn = Function('confirm')
msg = '"%s" does not exist. create?' % dirpath
result = confirm_fn(msg, "&Yes\n&No")
if result != 1: return
os.makedirs(dirpath)
# Open as new file
return command('e %s' % link)
| Python | 0 |
44532322f4a183bd535de22374d149a6e2f8176a | Fix pep8 errors | RandoAmisSecours/views/reporting.py | RandoAmisSecours/views/reporting.py | # -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2014 Rémi Duraffort
# This file is part of RandoAmisSecours.
#
# RandoAmisSecours is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RandoAmisSecours is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with RandoAmisSecours. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.utils.timezone import datetime, utc
from RandoAmisSecours.models import Outing, DRAFT, CONFIRMED, FINISHED
from datetime import timedelta
@staff_member_required
def index(request):
user_count = User.objects.count()
return render_to_response('RandoAmisSecours/reporting/index.html',
{'user_count': user_count},
context_instance=RequestContext(request))
@staff_member_required
def outings(request):
return render_to_response('RandoAmisSecours/reporting/outings.html',
context_instance=RequestContext(request))
@staff_member_required
def users(request):
now = datetime.utcnow().replace(tzinfo=utc)
users_list = User.objects.all()
joining_dates = [0] * 366
last_logins = [0] * 366
for user in users_list:
days_delta = (now - user.date_joined).days
if days_delta <= 365:
joining_dates[365 - days_delta] += 1
days_delta = (now - user.last_login).days
if days_delta <= 365:
last_logins[365 - days_delta] += 1
return render_to_response('RandoAmisSecours/reporting/users.html',
{'joining_dates': joining_dates,
'last_logins': last_logins},
context_instance=RequestContext(request))
| # -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2014 Rémi Duraffort
# This file is part of RandoAmisSecours.
#
# RandoAmisSecours is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RandoAmisSecours is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with RandoAmisSecours. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.utils.timezone import datetime, utc
from RandoAmisSecours.models import Outing, DRAFT, CONFIRMED, FINISHED
from datetime import timedelta
@staff_member_required
def index(request):
user_count = User.objects.count()
return render_to_response('RandoAmisSecours/reporting/index.html',
{'user_count': user_count},
context_instance=RequestContext(request))
@staff_member_required
def outings(request):
return render_to_response('RandoAmisSecours/reporting/outings.html',
context_instance=RequestContext(request))
@staff_member_required
def users(request):
now = datetime.utcnow().replace(tzinfo=utc)
users_list = User.objects.all()
joining_dates = [0] * 366
last_logins = [0] * 366
for user in users_list:
days_delta = (now - user.date_joined).days
if days_delta <= 365:
joining_dates[365 - days_delta] += 1
days_delta = (now - user.last_login).days
if days_delta <= 365:
last_logins[365 - days_delta] += 1
return render_to_response('RandoAmisSecours/reporting/users.html',
{'joining_dates': joining_dates,
'last_logins': last_logins},
context_instance=RequestContext(request))
| Python | 0.000217 |
b8e7b9d9316e84fec4c8524eab6839e82b8534c1 | Fix typo in add_subset | glue_vispy_viewers/volume/volume_viewer.py | glue_vispy_viewers/volume/volume_viewer.py | from ..common.vispy_data_viewer import BaseVispyViewer
from .layer_artist import VolumeLayerArtist
from .layer_style_widget import VolumeLayerStyleWidget
class VispyVolumeViewer(BaseVispyViewer):
LABEL = "3D Volume Rendering"
_layer_style_widget_cls = VolumeLayerStyleWidget
def add_data(self, data):
if data in self._layer_artist_container:
return True
layer_artist = VolumeLayerArtist(data, vispy_viewer=self._vispy_widget)
if len(self._layer_artist_container) == 0:
self._options_widget.set_limits(*layer_artist.bbox)
self._layer_artist_container.append(layer_artist)
return True
def add_subset(self, subset):
if subset in self._layer_artist_container:
return
if subset.to_mask().ndim != 3:
return
layer_artist = VolumeLayerArtist(subset, vispy_viewer=self._vispy_widget)
self._layer_artist_container.append(layer_artist)
def _add_subset(self, message):
self.add_subset(message.subset)
def _update_attributes(self, index=None, layer_artist=None):
pass
| from ..common.vispy_data_viewer import BaseVispyViewer
from .layer_artist import VolumeLayerArtist
from .layer_style_widget import VolumeLayerStyleWidget
class VispyVolumeViewer(BaseVispyViewer):
LABEL = "3D Volume Rendering"
_layer_style_widget_cls = VolumeLayerStyleWidget
def add_data(self, data):
if data in self._layer_artist_container:
return True
layer_artist = VolumeLayerArtist(data, vispy_viewer=self._vispy_widget)
if len(self._layer_artist_container) == 0:
self._options_widget.set_limits(*layer_artist.bbox)
self._layer_artist_container.append(layer_artist)
return True
def add_subset(self, subset):
if subset in self._layer_artist_container:
return
if subset.to_mask().ndim != 3:
return
layer_artist = VolumeLayerArtist(message.subset, vispy_viewer=self._vispy_widget)
self._layer_artist_container.append(layer_artist)
def _add_subset(self, message):
self.add_subset(message.subset)
def _update_attributes(self, index=None, layer_artist=None):
pass
| Python | 0.000093 |
dcffc99e64fa6f90b39dfa8bcb79e441e728831e | Updated the max length of a log message to be 1024 characters | google/cloud/forseti/common/util/logger.py | google/cloud/forseti/common/util/logger.py | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic util that wraps logging.
Setup logging for Forseti Security. Logs to console and syslog.
"""
import logging
import logging.handlers
DEFAULT_LOG_FMT = ('%(asctime)s %(levelname)s '
'%(name)s(%(funcName)s): %(message).1024s')
SYSLOG_LOG_FMT = ('[forseti-security] %(levelname)s '
'%(name)s(%(funcName)s): %(message).1024s')
LOGGERS = {}
LOGLEVELS = {
'debug': logging.DEBUG,
'info' : logging.INFO,
'warning' : logging.WARN,
'error' : logging.ERROR,
}
LOGLEVEL = logging.INFO
def get_logger(module_name):
"""Setup the logger.
Args:
module_name (str): The name of the mdule to describe the log entry.
Returns:
logger: An instance of the configured logger.
"""
# TODO: Move this into a configuration file.
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(DEFAULT_LOG_FMT))
syslog_handler = logging.handlers.SysLogHandler()
syslog_handler.setFormatter(logging.Formatter(SYSLOG_LOG_FMT))
logger_instance = logging.getLogger(module_name)
logger_instance.addHandler(syslog_handler)
logger_instance.setLevel(LOGLEVEL)
LOGGERS[module_name] = logger_instance
return logger_instance
def _map_logger(func):
"""Map function to current loggers.
Args:
func (function): Function to call on every logger.
"""
for logger in LOGGERS.itervalues():
func(logger)
def set_logger_level(level):
"""Modify log level of existing loggers as well as the default
for new loggers.
Args:
level (int): The log level to set the loggers to.
"""
# pylint: disable=global-statement
global LOGLEVEL
LOGLEVEL = level
_map_logger(lambda logger: logger.setLevel(level))
def set_logger_level_from_config(level_name):
"""Set the logger level from a config value.
Args:
level_name (str): The log level name. The accepted values are
in the LOGLEVELS variable.
"""
set_logger_level(LOGLEVELS.get(level_name, LOGLEVEL))
| # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic util that wraps logging.
Setup logging for Forseti Security. Logs to console and syslog.
"""
import logging
import logging.handlers
DEFAULT_LOG_FMT = ('%(asctime)s %(levelname)s '
'%(name)s(%(funcName)s): %(message)s')
SYSLOG_LOG_FMT = ('[forseti-security] %(levelname)s '
'%(name)s(%(funcName)s): %(message)s')
LOGGERS = {}
LOGLEVELS = {
'debug': logging.DEBUG,
'info' : logging.INFO,
'warning' : logging.WARN,
'error' : logging.ERROR,
}
LOGLEVEL = logging.INFO
def get_logger(module_name):
"""Setup the logger.
Args:
module_name (str): The name of the mdule to describe the log entry.
Returns:
logger: An instance of the configured logger.
"""
# TODO: Move this into a configuration file.
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(DEFAULT_LOG_FMT))
syslog_handler = logging.handlers.SysLogHandler()
syslog_handler.setFormatter(logging.Formatter(SYSLOG_LOG_FMT))
logger_instance = logging.getLogger(module_name)
logger_instance.addHandler(syslog_handler)
logger_instance.setLevel(LOGLEVEL)
LOGGERS[module_name] = logger_instance
return logger_instance
def _map_logger(func):
"""Map function to current loggers.
Args:
func (function): Function to call on every logger.
"""
for logger in LOGGERS.itervalues():
func(logger)
def set_logger_level(level):
"""Modify log level of existing loggers as well as the default
for new loggers.
Args:
level (int): The log level to set the loggers to.
"""
# pylint: disable=global-statement
global LOGLEVEL
LOGLEVEL = level
_map_logger(lambda logger: logger.setLevel(level))
def set_logger_level_from_config(level_name):
"""Set the logger level from a config value.
Args:
level_name (str): The log level name. The accepted values are
in the LOGLEVELS variable.
"""
set_logger_level(LOGLEVELS.get(level_name, LOGLEVEL))
| Python | 0.999991 |
872e008b3986b18a7c01cd47e91a7ef39e21006b | Remove unused parameter in test (#5423) | cirq-core/cirq/contrib/paulistring/pauli_string_dag_test.py | cirq-core/cirq/contrib/paulistring/pauli_string_dag_test.py | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
from cirq.contrib.paulistring import convert_and_separate_circuit, pauli_string_dag_from_circuit
def test_pauli_string_dag_from_circuit():
q0, q1, q2 = cirq.LineQubit.range(3)
c_orig = cirq.testing.nonoptimal_toffoli_circuit(q0, q1, q2)
c_left, _ = convert_and_separate_circuit(c_orig)
c_left_dag = pauli_string_dag_from_circuit(c_left)
c_left_reordered = c_left_dag.to_circuit()
cirq.testing.assert_allclose_up_to_global_phase(
c_left.unitary(), c_left_reordered.unitary(), atol=1e-7
)
| # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
from cirq.contrib.paulistring import convert_and_separate_circuit, pauli_string_dag_from_circuit
@pytest.mark.parametrize('repetition', range(6))
def test_pauli_string_dag_from_circuit(repetition):
q0, q1, q2 = cirq.LineQubit.range(3)
c_orig = cirq.testing.nonoptimal_toffoli_circuit(q0, q1, q2)
c_left, _ = convert_and_separate_circuit(c_orig)
c_left_dag = pauli_string_dag_from_circuit(c_left)
c_left_reordered = c_left_dag.to_circuit()
cirq.testing.assert_allclose_up_to_global_phase(
c_left.unitary(), c_left_reordered.unitary(), atol=1e-7
)
| Python | 0 |
7917515dc5580bbd56b477435a46552d3a5353af | drop other pattern ops indexes | corehq/warehouse/migrations/0033_form_fact_user_id_index.py | corehq/warehouse/migrations/0033_form_fact_user_id_index.py | from django.db import migrations, models
FORM_STAGING_USER_INDEX_NAME = 'warehouse_f_user_id_785d18_idx'
COLUMNS = ['user_id']
CREATE_INDEX_SQL = "CREATE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} ({})"
DROP_INDEX_SQL = "DROP INDEX CONCURRENTLY IF EXISTS {}"
class Migration(migrations.Migration):
atomic = False
dependencies = [
('warehouse', '0032_auto_20190917_1542'),
]
operations = [
migrations.RunSQL(
sql=CREATE_INDEX_SQL.format(
FORM_STAGING_USER_INDEX_NAME,
'warehouse_formstagingtable',
','.join(COLUMNS)
),
reverse_sql=DROP_INDEX_SQL.format(FORM_STAGING_USER_INDEX_NAME),
state_operations=[
migrations.AddIndex(
model_name='formstagingtable',
index=models.Index(fields=COLUMNS, name=FORM_STAGING_USER_INDEX_NAME),
),
]
),
migrations.RunSQL(
sql=DROP_INDEX_SQL.format('warehouse_formstagingtable_received_on_6a73ba8d'),
reverse_sql=migrations.RunSQL.noop,
state_operations=[
migrations.AlterField(
model_name='formstagingtable',
name='received_on',
field=models.DateTimeField(),
),
]
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_formstagingtable_form_id_246fcaf3_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_formfact_form_id_1bb74f90_like'),
migrations.RunSQL.noop
),
# "warehouse_formstagingtable_timezone_idx" btree (timezone('UTC'::text, GREATEST(received_on, deleted_on, edited_on)))
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_formstagingtable_timezone_idx'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_domaindim_domain_id_b1c3504b_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_groupdim_group_id_b5f6f7bd_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_locationdim_location_id_bb42cee7_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_userdim_user_id_701f9e28_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_appstatusformstaging_domain_5997210f_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_appstatusforms_submission_build_version_0a62241a_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_appstatusformstaging_commcare_version_cfb94daf_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_appstatussynclogstaging_domain_2ab81363_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_applicationstatusfact_domain_18a67251_like'),
migrations.RunSQL.noop
)
]
| from django.db import migrations, models
FORM_STAGING_USER_INDEX_NAME = 'warehouse_f_user_id_785d18_idx'
COLUMNS = ['user_id']
CREATE_INDEX_SQL = "CREATE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} ({})"
DROP_INDEX_SQL = "DROP INDEX CONCURRENTLY IF EXISTS {}"
class Migration(migrations.Migration):
atomic = False
dependencies = [
('warehouse', '0032_auto_20190917_1542'),
]
operations = [
migrations.RunSQL(
sql=CREATE_INDEX_SQL.format(
FORM_STAGING_USER_INDEX_NAME,
'warehouse_formstagingtable',
','.join(COLUMNS)
),
reverse_sql=DROP_INDEX_SQL.format(FORM_STAGING_USER_INDEX_NAME),
state_operations=[
migrations.AddIndex(
model_name='formstagingtable',
index=models.Index(fields=COLUMNS, name=FORM_STAGING_USER_INDEX_NAME),
),
]
),
migrations.RunSQL(
sql=DROP_INDEX_SQL.format('warehouse_formstagingtable_received_on_6a73ba8d'),
reverse_sql=migrations.RunSQL.noop,
state_operations=[
migrations.AlterField(
model_name='formstagingtable',
name='received_on',
field=models.DateTimeField(),
),
]
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_formstagingtable_form_id_246fcaf3_like'),
migrations.RunSQL.noop
),
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_formfact_form_id_1bb74f90_like'),
migrations.RunSQL.noop
),
# "warehouse_formstagingtable_timezone_idx" btree (timezone('UTC'::text, GREATEST(received_on, deleted_on, edited_on)))
migrations.RunSQL(
DROP_INDEX_SQL.format('warehouse_formstagingtable_timezone_idx'),
migrations.RunSQL.noop
)
]
| Python | 0 |
d7a665a3947e04c4689db67f35827db2cc3a6842 | Update randrect.pyde | sketches/randrect/randrect.pyde | sketches/randrect/randrect.pyde | from random import randint, choice
a = 220
colors = [color(155, 46, 105, a), color(217, 55, 80, a), color(226, 114, 79, a),
color(243, 220, 123, a), color(78, 147, 151, a)]
def setup():
size(600, 600)
rectMode(CENTER)
strokeWeight(5)
# noStroke()
background(255)
def draw():
for _ in range(200):
fill(choice(colors))
rect(randint(0, width), randint(0, height), randint(10, width - 200),
randint(10, int((height/2) - 100)))
noLoop()
| from random import randint, choice
a = 220
colors = [color(155, 46, 105, a), color(217, 55, 80, a), color(226, 114, 79, a),
color(243, 220, 123, a), color(78, 147, 151, a)]
def setup():
size(940, 300)
rectMode(CENTER)
strokeWeight(2)
# noStroke()
background(255)
def draw():
for _ in range(200):
fill(choice(colors))
rect(randint(0, width), randint(0, height), randint(10, width - 200),
randint(10, int((height/2) - 100)))
noLoop()
| Python | 0.000001 |
06d281160d5ce60f9c8dea5c75d8234e70f63642 | Fix loading script to actually save options for answers. | estudios_socioeconomicos/load.py | estudios_socioeconomicos/load.py | import pickle
from estudios_socioeconomicos.models import Seccion, Subseccion, Pregunta, OpcionRespuesta
def parse(name):
""" utility script to parse the study.
"""
preguntas = {}
while True:
seccion = input('Ingrese el nombre de la seccion: ')
if seccion == 'n':
break
if seccion not in preguntas:
preguntas[seccion] = {}
while True:
subseccion = input('Ingrese el nombre de la subseccion dentro de %s: ' % seccion)
if subseccion == 'n':
break
curr = 1
if subseccion not in preguntas[seccion]:
preguntas[seccion][subseccion] = []
while True:
p = input('Ingrese el nombre de la pregunta: ')
if p == 'n':
break
opt = input('Respuestas: ')
opt = opt.split(',')
rel_integrante = input('related? (y/n): ')
preguntas[seccion][subseccion].append({
'texto': p,
'numero': curr,
'opciones': list(map(lambda x: x.strip(), opt)) if len(opt) > 1 else [],
'relacionado_a_integrante': rel_integrante == 'y'
})
curr += 1
print(preguntas)
pickle.dump(preguntas, open(name, 'wb'))
def load_data(name='estudios_socioeconomicos/preguntas.pkl'):
""" Load the questions and sections for the study.
To execute: import this function after running
python manage.py shell
and just call it.
"""
preguntas = pickle.load(open(name, 'rb'))
nums = {
'Generales del Solicitante': 1,
'Datos y Relación Familiar de Todos los Integrantes de la Vivienda': 2,
'Situación Económica': 3,
'Vivienda y Entorno Social': 4,
'Investigación Laboral': 6,
'Personalidad': 7,
'Otros Aspectos': 8
}
for sec in preguntas.keys():
seccion = Seccion.objects.get_or_create(nombre=sec, numero=nums[sec])[0]
for i, sub in enumerate(preguntas[sec].keys()):
subseccion = Subseccion.objects.get_or_create(
seccion=seccion,
nombre=sub,
numero=i)[0]
for p in preguntas[sec][sub]:
pregunta = Pregunta.objects.get_or_create(
subseccion=subseccion,
texto=p['texto'],
descripcion=p['descripcion'],
orden=p['numero'],
)[0]
for opt in p['opciones']:
OpcionRespuesta.objects.get_or_create(
pregunta=pregunta,
texto=opt)
| import pickle
from estudios_socioeconomicos.models import Seccion, Subseccion, Pregunta, OpcionRespuesta
def parse(name):
""" utility script to parse the study.
"""
preguntas = {}
while True:
seccion = input('Ingrese el nombre de la seccion: ')
if seccion == 'n':
break
if seccion not in preguntas:
preguntas[seccion] = {}
while True:
subseccion = input('Ingrese el nombre de la subseccion dentro de %s: ' % seccion)
if subseccion == 'n':
break
curr = 1
if subseccion not in preguntas[seccion]:
preguntas[seccion][subseccion] = []
while True:
p = input('Ingrese el nombre de la pregunta: ')
if p == 'n':
break
opt = input('Respuestas: ')
opt = opt.split(',')
rel_integrante = input('related? (y/n): ')
preguntas[seccion][subseccion].append({
'texto': p,
'numero': curr,
'opciones': list(map(lambda x: x.strip(), opt)) if len(opt) > 1 else [],
'relacionado_a_integrante': rel_integrante == 'y'
})
curr += 1
print(preguntas)
pickle.dump(preguntas, open(name, 'wb'))
def load_data(name='estudios_socioeconomicos/preguntas.pkl'):
""" Load the questions and sections for the study.
To execute: import this function after running
python manage.py shell
and just call it.
"""
preguntas = pickle.load(open(name, 'rb'))
nums = {
'Generales del Solicitante': 1,
'Datos y Relación Familiar de Todos los Integrantes de la Vivienda': 2,
'Situación Económica': 3,
'Vivienda y Entorno Social': 4,
'Investigación Laboral': 6,
'Personalidad': 7,
'Otros Aspectos': 8
}
for sec in preguntas.keys():
seccion = Seccion.objects.create(nombre=sec, numero=nums[sec])
for i, sub in enumerate(preguntas[sec].keys()):
subseccion = Subseccion.objects.create(
seccion=seccion,
nombre=sub,
numero=i)
for p in preguntas[sec][sub]:
pregunta = Pregunta.objects.create(
subseccion=subseccion,
texto=p['texto'],
descripcion=p['descripcion'],
orden=p['numero'],
)
map(lambda o: OpcionRespuesta.objects.create(
pregunta=pregunta, texto=o), p['opciones'])
| Python | 0 |
4ff146a7e81fe4b065a551e29f42ef9b2a973823 | document title edit | servee_document/views.py | servee_document/views.py | import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from .models import Document
@csrf_exempt
@require_POST
@login_required
def upload_documents(request):
documents = []
for f in request.FILES.getlist("file"):
obj = Document.objects.create(document=f)
documents.append({"filelink": obj.document.url})
return HttpResponse(json.dumps(documents), mimetype="application/json")
@login_required
def recent_documents(request):
documents = [
{"thumb": obj.document.url, "document": obj.document.url, "title": obj.title}
for obj in Document.objects.all().order_by("-uploaded")[:20]
]
return HttpResponse(json.dumps(documents), mimetype="application/json")
| import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from .models import Document
@csrf_exempt
@require_POST
@login_required
def upload_documents(request):
documents = []
for f in request.FILES.getlist("file"):
obj = Document.objects.create(document=f)
documents.append({"filelink": obj.document.url})
return HttpResponse(json.dumps(documents), mimetype="application/json")
@login_required
def recent_documents(request):
documents = [
{"thumb": obj.document.url, "document": obj.document.url, "title": obj.document.name}
for obj in Document.objects.all().order_by("-uploaded")[:20]
]
return HttpResponse(json.dumps(documents), mimetype="application/json")
| Python | 0.000003 |
4aad6aa1b50f8b0c86c49867bc75081b03b03086 | remove unused import | repository/shimclient.py | repository/shimclient.py | #
# Copyright (c) 2005 rpath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import netclient
class ShimNetClient(netclient.NetworkRepositoryClient):
"""
A subclass of NetworkRepositoryClient which can take a NetworkRepositoryServer
instance (plus a few other pieces of information) and expose the netclient
interface without the overhead of XMLRPC.
"""
def __init__(self, server, protocol, port, authToken, repMap):
netclient.NetworkRepositoryClient.__init__(self, repMap)
self.server = ShimServerProxy(server, protocol, port, authToken)
class _ShimMethod(netclient._Method):
def __init__(self, server, protocol, port, authToken, name):
self._server = server
self._authToken = authToken
self._name = name
self._protocol = protocol
self._port = port
def __repr__(self):
return "<server._ShimMethod(%r)>" % (self._ShimMethod__name)
def __call__(self, *args):
isException, result = self._server.callWrapper(
self._protocol, self._port,
self._name, self._authToken, args)
if not isException:
return result
else:
self.handleError(result)
class ShimServerProxy(netclient.ServerProxy):
def __init__(self, server, protocol, port, authToken):
self._authToken = authToken
self._server = server
self._protocol = protocol
self._port = port
def __getattr__(self, name):
return _ShimMethod(self._server,
self._protocol, self._port,
self._authToken, name)
| #
# Copyright (c) 2005 rpath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import netclient
import netrepos
class ShimNetClient(netclient.NetworkRepositoryClient):
"""
A subclass of NetworkRepositoryClient which can take a NetworkRepositoryServer
instance (plus a few other pieces of information) and expose the netclient
interface without the overhead of XMLRPC.
"""
def __init__(self, server, protocol, port, authToken, repMap):
netclient.NetworkRepositoryClient.__init__(self, repMap)
self.server = ShimServerProxy(server, protocol, port, authToken)
class _ShimMethod(netclient._Method):
def __init__(self, server, protocol, port, authToken, name):
self._server = server
self._authToken = authToken
self._name = name
self._protocol = protocol
self._port = port
def __repr__(self):
return "<server._ShimMethod(%r)>" % (self._ShimMethod__name)
def __call__(self, *args):
isException, result = self._server.callWrapper(
self._protocol, self._port,
self._name, self._authToken, args)
if not isException:
return result
else:
self.handleError(result)
class ShimServerProxy(netclient.ServerProxy):
def __init__(self, server, protocol, port, authToken):
self._authToken = authToken
self._server = server
self._protocol = protocol
self._port = port
def __getattr__(self, name):
return _ShimMethod(self._server,
self._protocol, self._port,
self._authToken, name)
| Python | 0.000001 |
c36282a41c248ad3d2405a0461195c679ef5327c | disable socket timeout, set heartbeat to 10 from 30 | hiispider/amqp/amqp.py | hiispider/amqp/amqp.py | import specs
from twisted.internet.protocol import ClientCreator
from twisted.internet import reactor
from txamqp.client import TwistedDelegate
from txamqp.protocol import AMQClient
import txamqp.spec
def createClient(amqp_host, amqp_vhost, amqp_port=5672):
amqp_spec = txamqp.spec.loadString(specs.v0_8)
amqp_delegate = TwistedDelegate()
client = ClientCreator(reactor,
AMQClient,
delegate=amqp_delegate,
vhost=amqp_vhost,
spec=amqp_spec,
heartbeat=10).connectTCP(amqp_host, amqp_port, timeout=0)
return client
| import specs
from twisted.internet.protocol import ClientCreator
from twisted.internet import reactor
from txamqp.client import TwistedDelegate
from txamqp.protocol import AMQClient
import txamqp.spec
def createClient(amqp_host, amqp_vhost, amqp_port=5672):
amqp_spec = txamqp.spec.loadString(specs.v0_8)
amqp_delegate = TwistedDelegate()
client = ClientCreator(reactor,
AMQClient,
delegate=amqp_delegate,
vhost=amqp_vhost,
spec=amqp_spec,
heartbeat=30).connectTCP(amqp_host, amqp_port)
return client | Python | 0.000001 |
4ef2c3988616736800b4e4470b598449b06790cd | Refactor some score calculation to make using the competition scorer easier | svm.py | svm.py | import numpy as np
from sklearn import svm, preprocessing
from feature_generation import FeatureGenerator
from FeatureData import FeatureData
class SVMModel(object):
def __init__(self):
self._stance_map = {'unrelated': 0, 'discuss': 2, 'agree': 3, 'disagree': 4}
self._use_features= [
# 'refuting',
'ngrams',
# 'polarity',
'named'
]
def get_data(self, body_file, stance_file):
feature_data = FeatureData(body_file, stance_file)
X_train = FeatureGenerator.get_features_from_file(self._use_features)
y_train = np.asarray([self._stance_map[stance['Stance']] for stance in feature_data.stances])
# Scale features to range[0, 1] to prevent larger features from dominating smaller ones
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
return {'X':X_train, 'y':y_train}
def related_unrelated(self, y):
return [x > 0 for x in y]
def get_trained_classifier(self, X_train, y_train):
"""Trains the svm classifier and returns the trained classifier to be used for prediction on test data. Note
that stances in test data will need to be translated to the numbers shown in self._stance_map."""
svm_classifier = svm.SVC(decision_function_shape='ovr', cache_size=1000)
svm_classifier.fit(X_train, y_train)
return svm_classifier
def test_classifier(self, svm_classifier, X_test, y_test):
predicted = []
for i, stance in enumerate(y_test):
predicted.append(svm_classifier.predict([X_test[i]])[0])
return predicted
def precision(self, actual, predicted):
pairs = zip(actual, predicted)
truePositive = np.count_nonzero([x[1] > 0 for x in pairs if x[0] > 0])
falsePositive = np.count_nonzero([x[1] > 0 for x in pairs if x[0] == 0])
return float(truePositive) / (truePositive + falsePositive + 1)
def recal(self, actual, predicted):
pairs = zip(actual, predicted)
truePositive = np.count_nonzero([x[1] > 0 for x in pairs if x[0] > 0])
falseNegative = np.count_nonzero([x[1] == 0 for x in pairs if x[0] > 0])
return float(truePositive) / (truePositive + falseNegative + 1)
def accuracy(self, actual, predicted):
pairs = zip(actual, predicted)
accurate = np.count_nonzero([x[1] == x[0] for x in pairs])
return float(accurate)/len(pairs)
if __name__ == '__main__':
model = SVMModel()
data = model.get_data('data/train_bodies.csv', 'data/train_stances.csv')
testNum = 1000
X_test = data['X'][-testNum:]
X_train = data['X'][:-testNum]
Only_R_UR = True
if Only_R_UR:
y_test = model.related_unrelated(data['y'][-testNum:])
y_train = model.related_unrelated(data['y'][:-testNum])
else:
y_test = data['y'][-testNum:]
y_train = data['y'][:-testNum]
classifier = model.get_trained_classifier(X_train, y_train)
predicted = model.test_classifier(classifier, X_test, y_test)
print str(model._use_features)
print "Precision %f" % model.precision(y_test, predicted)
print "Recal %f" % model.recal(y_test, predicted)
print "Accuracy %f" % model.accuracy(y_test, predicted)
| import numpy as np
from sklearn import svm, preprocessing
from feature_generation import FeatureGenerator
from FeatureData import FeatureData
class SVMModel(object):
def __init__(self):
self._stance_map = {'unrelated': 0, 'discuss': 2, 'agree': 3, 'disagree': 4}
self._use_features= [
# 'refuting',
'ngrams',
# 'polarity',
'named'
]
def get_data(self, body_file, stance_file):
feature_data = FeatureData(body_file, stance_file)
X_train = FeatureGenerator.get_features_from_file(self._use_features)
y_train = np.asarray([self._stance_map[stance['Stance']] for stance in feature_data.stances])
# Scale features to range[0, 1] to prevent larger features from dominating smaller ones
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
return {'X':X_train, 'y':y_train}
def related_unrelated(self, y):
return [x > 0 for x in y]
def get_trained_classifier(self, X_train, y_train):
"""Trains the svm classifier and returns the trained classifier to be used for prediction on test data. Note
that stances in test data will need to be translated to the numbers shown in self._stance_map."""
svm_classifier = svm.SVC(decision_function_shape='ovr', cache_size=1000)
svm_classifier.fit(X_train, y_train)
return svm_classifier
def test_classifier(self, svm_classifier, X_test, y_test):
predicted = []
for i, stance in enumerate(y_test):
predicted.append(svm_classifier.predict([X_test[i]])[0])
print str(self._use_features)
print "Precision %f" % self.precision(y_test, predicted)
print "Recal %f" % self.recal(y_test, predicted)
print "Accuracy %f" % self.accuracy(y_test, predicted)
def precision(self, actual, predicted):
pairs = zip(actual, predicted)
truePositive = np.count_nonzero([x[1] > 0 for x in pairs if x[0] > 0])
falsePositive = np.count_nonzero([x[1] > 0 for x in pairs if x[0] == 0])
return float(truePositive) / (truePositive + falsePositive + 1)
def recal(self, actual, predicted):
pairs = zip(actual, predicted)
truePositive = np.count_nonzero([x[1] > 0 for x in pairs if x[0] > 0])
falseNegative = np.count_nonzero([x[1] == 0 for x in pairs if x[0] > 0])
return float(truePositive) / (truePositive + falseNegative + 1)
def accuracy(self, actual, predicted):
pairs = zip(actual, predicted)
accurate = np.count_nonzero([x[1] == x[0] for x in pairs])
return float(accurate)/len(pairs)
if __name__ == '__main__':
model = SVMModel()
data = model.get_data('data/train_bodies.csv', 'data/train_stances.csv')
testNum = 1000
X_test = data['X'][-testNum:]
X_train = data['X'][:-testNum]
Only_R_UR = True
if Only_R_UR:
y_test = model.related_unrelated(data['y'][-testNum:])
y_train = model.related_unrelated(data['y'][:-testNum])
else:
y_test = data['y'][-testNum:]
y_train = data['y'][:-testNum]
classifier = model.get_trained_classifier(X_train, y_train)
model.test_classifier(classifier, X_test, y_test)
| Python | 0.000015 |
1198d398fc8051142c700991c1360750b0857dc4 | Update mp3test.py | home/Markus/mp3test.py | home/Markus/mp3test.py | # this is a test script
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
import random
mouth = Runtime.createAndStart("mouth","Speech")
music = 1
# add python as a listener of the "stopped" event from audioFile
mouth.audioFile.addListener("stopped", python.name, "stopped")
def play():
number = str(random.randint(1, 8))
# usually you need to escape backslash
mouth.audioFile.playFile("C:\\Users\\Markus\\Music\\Robynsfavoriter\\music" + str(number) + ".mp3", False)
print number
mouth.speak("playing song number" + str(number))
# stopped method is called when at the end of an audio file
def stopped():
if music == 1:
print("I have started playing")
global music
music = 2
elif music == 2:
global music
music = 1
play()
play()
| # this is a test script
# i have a folder with the mp3 files named from music1 to music8.
# it random choses the files . no problem
# but i want to change the sleep(120) so the next starts when the previous is finished
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
import random
mouth = Runtime.createAndStart("mouth","Speech")
music = 1
# add python as a listener of the "stopped" event from audioFile
mouth.audioFile.addListener("stopped", python.name, "stopped")
def play():
number = str(random.randint(1, 8))
# usually you need to escape backslash
mouth.audioFile.playFile("C:\\Users\\Markus\\Music\\Robynsfavoriter\\music" + str(number) + ".mp3", False)
print number
mouth.speak("playing song number" + str(number))
# stopped method is called when at the end of an audio file
def stopped():
if music == 1:
print("I have started playing")
global music
music = 2
elif music == 2:
global music
music = 1
play()
play()
| Python | 0.000001 |
53b346ff3d816cc2e637dd19003ff06505023929 | Test InvalidUCRData more explicitly | corehq/apps/userreports/tests/test_save_errors.py | corehq/apps/userreports/tests/test_save_errors.py | from __future__ import absolute_import
from __future__ import unicode_literals
import uuid
from alembic.operations import Operations
from alembic.runtime.migration import MigrationContext
from django.test import TestCase, override_settings
from corehq.apps.userreports.app_manager.helpers import clean_table_name
from corehq.apps.userreports.const import UCR_SQL_BACKEND
from corehq.apps.userreports.exceptions import TableNotFoundWarning, MissingColumnWarning
from corehq.apps.userreports.models import DataSourceConfiguration, InvalidUCRData
from corehq.apps.userreports.util import get_indicator_adapter
from six.moves import range
def get_sample_config(domain=None):
return DataSourceConfiguration(
domain=domain or 'domain',
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=clean_table_name('domain', str(uuid.uuid4().hex)),
configured_indicators=[{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'name'
},
"column_id": 'name',
"display_name": 'name',
"datatype": "string"
}],
)
class SaveErrorsTest(TestCase):
def setUp(self):
self.config = get_sample_config()
def tearDown(self):
self.config = get_sample_config()
self._get_adapter().drop_table()
def _get_adapter(self):
return get_indicator_adapter(self.config, raise_errors=True)
def test_raise_error_for_missing_table(self):
adapter = self._get_adapter()
adapter.drop_table()
doc = {
"_id": '123',
"domain": "domain",
"doc_type": "CommCareCase",
"name": 'bob'
}
with self.assertRaises(TableNotFoundWarning):
adapter.best_effort_save(doc)
def test_missing_column(self):
adapter = self._get_adapter()
adapter.build_table()
with adapter.engine.begin() as connection:
context = MigrationContext.configure(connection)
op = Operations(context)
op.drop_column(adapter.get_table().name, 'name')
doc = {
"_id": '123',
"domain": "domain",
"doc_type": "CommCareCase",
"name": 'bob'
}
with self.assertRaises(MissingColumnWarning):
adapter.best_effort_save(doc)
def test_non_nullable_column(self):
self.config.configured_indicators[0]['is_nullable'] = False
self.config._id = 'docs id'
adapter = self._get_adapter()
adapter.build_table()
doc = {
"_id": '123',
"domain": "domain",
"doc_type": "CommCareCase",
"name": None
}
adapter.best_effort_save(doc)
invalid = InvalidUCRData.objects.all()
self.assertEqual(len(invalid), 1)
self.assertEqual(invalid[0].validation_name, 'not_null_violation')
self.assertEqual(invalid[0].doc_id, '123')
class AdapterBulkSaveTest(TestCase):
def setUp(self):
self.domain = 'adapter_bulk_save'
self.config = get_sample_config(domain=self.domain)
self.config.save()
self.adapter = get_indicator_adapter(self.config, raise_errors=True)
def tearDown(self):
self.config.delete()
self.adapter.clear_table()
def test_bulk_save(self):
docs = []
for i in range(10):
docs.append({
"_id": str(i),
"domain": self.domain,
"doc_type": "CommCareCase",
"name": 'doc_name_' + str(i)
})
self.adapter.build_table()
self.adapter.bulk_save(docs)
self.assertEqual(self.adapter.get_query_object().count(), 10)
self.adapter.bulk_delete([doc['_id'] for doc in docs])
self.assertEqual(self.adapter.get_query_object().count(), 0)
def test_save_rows_empty(self):
self.adapter.build_table()
self.adapter.save_rows([])
| from __future__ import absolute_import
from __future__ import unicode_literals
import uuid
from alembic.operations import Operations
from alembic.runtime.migration import MigrationContext
from django.test import TestCase, override_settings
from corehq.apps.userreports.app_manager.helpers import clean_table_name
from corehq.apps.userreports.const import UCR_SQL_BACKEND
from corehq.apps.userreports.exceptions import TableNotFoundWarning, MissingColumnWarning
from corehq.apps.userreports.models import DataSourceConfiguration, InvalidUCRData
from corehq.apps.userreports.util import get_indicator_adapter
from six.moves import range
def get_sample_config(domain=None):
return DataSourceConfiguration(
domain=domain or 'domain',
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=clean_table_name('domain', str(uuid.uuid4().hex)),
configured_indicators=[{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'name'
},
"column_id": 'name',
"display_name": 'name',
"datatype": "string"
}],
)
class SaveErrorsTest(TestCase):
def setUp(self):
self.config = get_sample_config()
def tearDown(self):
self.config = get_sample_config()
self._get_adapter().drop_table()
def _get_adapter(self):
return get_indicator_adapter(self.config, raise_errors=True)
def test_raise_error_for_missing_table(self):
adapter = self._get_adapter()
adapter.drop_table()
doc = {
"_id": '123',
"domain": "domain",
"doc_type": "CommCareCase",
"name": 'bob'
}
with self.assertRaises(TableNotFoundWarning):
adapter.best_effort_save(doc)
def test_missing_column(self):
adapter = self._get_adapter()
adapter.build_table()
with adapter.engine.begin() as connection:
context = MigrationContext.configure(connection)
op = Operations(context)
op.drop_column(adapter.get_table().name, 'name')
doc = {
"_id": '123',
"domain": "domain",
"doc_type": "CommCareCase",
"name": 'bob'
}
with self.assertRaises(MissingColumnWarning):
adapter.best_effort_save(doc)
def test_non_nullable_column(self):
self.config.configured_indicators[0]['is_nullable'] = False
self.config._id = 'docs id'
adapter = self._get_adapter()
adapter.build_table()
doc = {
"_id": '123',
"domain": "domain",
"doc_type": "CommCareCase",
"name": None
}
adapter.best_effort_save(doc)
self.assertEqual(InvalidUCRData.objects.count(), 1)
class AdapterBulkSaveTest(TestCase):
def setUp(self):
self.domain = 'adapter_bulk_save'
self.config = get_sample_config(domain=self.domain)
self.config.save()
self.adapter = get_indicator_adapter(self.config, raise_errors=True)
def tearDown(self):
self.config.delete()
self.adapter.clear_table()
def test_bulk_save(self):
docs = []
for i in range(10):
docs.append({
"_id": str(i),
"domain": self.domain,
"doc_type": "CommCareCase",
"name": 'doc_name_' + str(i)
})
self.adapter.build_table()
self.adapter.bulk_save(docs)
self.assertEqual(self.adapter.get_query_object().count(), 10)
self.adapter.bulk_delete([doc['_id'] for doc in docs])
self.assertEqual(self.adapter.get_query_object().count(), 0)
def test_save_rows_empty(self):
self.adapter.build_table()
self.adapter.save_rows([])
| Python | 0 |
7388de0439913a8a33ac47a3cec14546e2860737 | Add code example with loggable quantities in _CustomAction | hoomd/custom_action.py | hoomd/custom_action.py | from abc import ABC, abstractmethod
from hoomd.parameterdicts import ParameterDict
from hoomd.operation import _HOOMDGetSetAttrBase
class _CustomAction(ABC):
"""Base class for all Python ``Action``s.
This class must be the parent class for all Python ``Action``s. This class
requires all subclasses to implement the act method which performs the
Python object's task whether that be updating the system, writing output, or
analyzing some property of the system.
To use subclasses of this class, the object must be passed as an argument
for the `hoomd.python_action._CustomOperation` constructor.
If the pressure, rotational kinetic energy, or external field virial is
needed for a subclass, the flags attribute of the class needs to be set with
the appropriate flags from `hoomd.util.ParticleDataFlags`.
.. code-block:: python
from hoomd.python_action import _CustomAction
from hoomd.util import ParticleDataFlags
class ExampleActionWithFlag(_CustomAction):
flags = [ParticleDataFlags.ROTATIONAL_KINETIC_ENERGY,
ParticleDataFlags.PRESSURE_TENSOR,
ParticleDataFlags.EXTERNAL_FIELD_VIRIAL]
def act(self, timestep):
pass
For advertising loggable quantities through the
`hoomd.python_action._CustomOperation` object, the class attribute
``log_quantities`` can be used. The dictionary expects string keys with the
name of the loggable and `hooomd.logger.LoggerQuantity` objects as the
values.
.. code-block:: python
from hoomd.python_action import _CustomAction
from hoomd.logger import LoggerQuantity
class ExampleActionWithFlag(_CustomAction):
def __init__(self):
self.log_quantities = {
'loggable': LoggerQuantity('scalar_loggable',
self.__class__,
flag='scalar')}
def loggable(self):
return 42
def act(self, timestep):
pass
"""
flags = []
log_quantities = {}
def __init__(self):
pass
def attach(self, simulation):
self._state = simulation.state
def detach(self):
if hasattr(self, '_state'):
del self._state
@abstractmethod
def act(self, timestep):
pass
class _InternalCustomAction(_CustomAction, _HOOMDGetSetAttrBase):
"""An internal class for Python ``Action``s.
Gives additional support in using HOOMD constructs like ``ParameterDict``s
and ``TypeParameters``.
"""
pass
| from abc import ABC, abstractmethod
from hoomd.parameterdicts import ParameterDict
from hoomd.operation import _HOOMDGetSetAttrBase
class _CustomAction(ABC):
"""Base class for all Python ``Action``s.
This class must be the parent class for all Python ``Action``s. This class
requires all subclasses to implement the act method which performs the
Python object's task whether that be updating the system, writing output, or
analyzing some property of the system.
To use subclasses of this class, the object must be passed as an argument
for the `hoomd.python_action._CustomOperation` constructor.
If the pressure, rotational kinetic energy, or external field virial is
needed for a subclass, the flags attribute of the class needs to be set with
the appropriate flags from `hoomd.util.ParticleDataFlags`.
.. code-block:: python
from hoomd.python_action import _CustomAction
from hoomd.util import ParticleDataFlags
class ExampleActionWithFlag(_CustomAction):
flags = [ParticleDataFlags.ROTATIONAL_KINETIC_ENERGY,
ParticleDataFlags.PRESSURE_TENSOR,
ParticleDataFlags.EXTERNAL_FIELD_VIRIAL]
def act(self, timestep):
pass
For advertising loggable quantities through the
`hoomd.python_action._CustomOperation` object, the class attribute
``log_quantities`` can be used. The dictionary expects string keys with the
name of the loggable and `hooomd.logger.LoggerQuantity` objects as the
values.
"""
flags = []
log_quantities = {}
def __init__(self):
pass
def attach(self, simulation):
self._state = simulation.state
def detach(self):
if hasattr(self, '_state'):
del self._state
@abstractmethod
def act(self, timestep):
pass
class _InternalCustomAction(_CustomAction, _HOOMDGetSetAttrBase):
"""An internal class for Python ``Action``s.
Gives additional support in using HOOMD constructs like ``ParameterDict``s
and ``TypeParameters``.
"""
pass
| Python | 0 |
94ecbdc67dd72c671862aea29fd5525ea92650d8 | Update model.py | hurricane/model.py | hurricane/model.py |
import sys
import os
import urllib2
import datetime
import time
import psycopg2
import pandas
from subprocess import call, Popen
# pull the 6 hr forecast track forecast from NHC
#os.system("wget http://www.srh.noaa.gov/ridge2/Precip/qpehourlyshape/latest/last_1_hours.tar.gz -O last_1_hours.tar.gz")
#os.system("mv last_1_hours.tar.gz last_1_hours.tar")
#os.system("tar xvf last_1_hours.tar")
#last_1hr_shp = './latest/last_1_hours.shp'
#last_hr_shp2pgsql = 'ogr2ogr -f "PostgreSQL" PG:"user=postgres dbname=hamlet password=password" {} -t_srs EPSG:4326 -nln last_1hr_qpe -overwrite'.format(last_1hr_shp)
#print last_hr_shp2pgsql
#call(last_hr_shp2pgsql, shell = True)
conn_string = "dbname='hamlethurricane' user=postgres port='5432' host='127.0.0.1' password='password'"
print "Connecting to database..."
try:
conn = psycopg2.connect(conn_string)
except Exception as e:
print str(e)
sys.exit()
print "Connected!\n"
dataframe_cur = conn.cursor()
dataframe_cur.execute("""Select * from hurricane_irene""")
data = dataframe_cur.fetchall()
colnames = [desc[0] for desc in dataframe_cur.description]
dataframe = pandas.DataFrame(data)
dataframe.columns = colnames
print data
print dataframe
conn.commit()
num_feat = len(data)
for i in range(len(data))
os.system('pgsql2shp -f {} -u postgres dbname=hamlet password=password" {} -t_srs EPSG:4326 -nln last_1hr_qpe -overwrite ').
#drop_cur.close()
# hurricane_cur = conn.cursor()
# hurricane_cur.execute("""
# create table roads_flooded_bunco as
# select
# a.gid,
# street_nam,
# sum(b.globvalue),
# a.geom
# from conterlines_poly as a
# inner join last_1hr_qpe as b
# on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
# group by a.gid, a.street_nam, a.geom;""")
# conn.commit()
| Python | 0.000001 | |
c78cb26ff07712027b3ae340d6209482e8708641 | implement single-length tuple | vlermv/transformers/raw.py | vlermv/transformers/raw.py | import posixpath
error_msg = '''The index must be a string.'''
def to_path(key):
if isinstance(key, tuple) and len(key) == 1:
key = key[0]
if hasattr(key, 'strip') and hasattr(key, 'split'):
return tuple(key.strip('/').split('/'))
else:
raise TypeError('Key must be string-like or a tuple of length one.')
def from_path(path):
return posixpath.join(*path)
| import posixpath
error_msg = '''The index must be a string.'''
def to_path(key):
return tuple(key.strip('/').split('/'))
def from_path(path):
return posixpath.join(*path)
| Python | 0.999999 |
8cc4816556d43fde3f51b75d985fbf2e15299302 | Add version and a help text | subvenv/core.py | subvenv/core.py | #!/usr/bin/env python
# coding=utf-8
import argparse
import json
import logging
import os
import sys
import click
from collections import namedtuple
log = logging.getLogger(__name__)
HELP_COMMANDS = dict(help_option_names=['-h', '--help'])
class VirtualenvError(Exception):
pass
def get_virtualenv():
path = os.getenv('VIRTUAL_ENV')
if not path:
raise VirtualenvError(
'Trying to get virtualenv data while not in a virtualenv'
)
name = os.path.basename(path)
interpreter = os.path.join(path, 'bin', 'python')
Virtualenv = namedtuple('Virtualenv', ['name', 'path', 'interpreter'])
return Virtualenv(name, path, interpreter)
def post_mkproject(args=None):
"""
Create a Sublime text project file on virtualenvwrapper project
creation.
"""
try:
venv = get_virtualenv()
except VirtualenvError:
sys.exit('You need to be inside a virtualenv for using subvenv.')
project_path_file = os.path.join(venv.path, '.project')
try:
with open(project_path_file, 'r') as f:
project_folder = f.readline().rstrip('\r\n')
except IOError:
sys.exit('Virtualenv project not found.\n')
create_sublime_project_file(project_folder, venv.name, venv.interpreter)
def create_sublime_project_file(project_folder, project_name, interpreter):
"""
Create a Sublime Text project file in the given project folder.
Args:
project_folder (str): path to project folder
project_name (str): name of the project
interpreter (str): path to the Python interpreter used for
the project
"""
sublime_file_name = "{}.sublime-project".format(project_name)
settings_text = {
"folders": [
{
"follow_symlinks": True,
"path": project_folder,
},
],
"settings": {
"python_interpreter": interpreter,
},
}
target_path = (os.path.join(project_folder, sublime_file_name))
try:
with open(target_path, 'w') as f:
f.write(json.dumps(settings_text, sort_keys=True, indent=4))
except IOError:
sys.exit(
'Cannot create file.\n\
Attempted path: {}'.format(project_folder)
)
@click.group(context_settings=HELP_COMMANDS)
def cli():
"""
Subvenv is a tool for creating virtualenv-friendly Sublime Text
project files.
It can be used as a standalone or as a plugin for Virtualenwrapper.
See https://github.com/Railslide/subvenv for more information.
"""
pass
@cli.command()
@click.option(
'--folder',
type=click.Path(),
help='Target folder for file creation.'
)
def make_project(folder=None):
"""
Create a Sublime project file for the current virtual environment.
If no target folder is specified, the file will be created in
the current working directory.
"""
if not folder:
folder = os.getcwd()
folder = os.path.abspath(folder)
try:
venv = get_virtualenv()
except VirtualenvError:
sys.exit('You need to be inside a virtualenv for using subvenv.')
create_sublime_project_file(folder, venv.name, venv.interpreter)
def main():
description = (
'Subvenv is a tool for creating virtualenv-friendly Sublime Text '
'project files.\nIt can be used as a standalone or as a plugin for '
'Virtualenwrapper. \n\nSee https://github.com/Railslide/subvenv '
'for more information.'
)
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'make_project',
help="create a Sublime Text project file in the given project folder"
)
parser.add_argument(
"-v", "--version",
help="print version information",
action="store_true"
)
parser.parse_args()
if __name__ == '__main__':
main()
# cli()
| #!/usr/bin/env python
# coding=utf-8
import json
import logging
import os
import sys
import click
from collections import namedtuple
log = logging.getLogger(__name__)
HELP_COMMANDS = dict(help_option_names=['-h', '--help'])
class VirtualenvError(Exception):
pass
def get_virtualenv():
path = os.getenv('VIRTUAL_ENV')
if not path:
raise VirtualenvError(
'Trying to get virtualenv data while not in a virtualenv'
)
name = os.path.basename(path)
interpreter = os.path.join(path, 'bin', 'python')
Virtualenv = namedtuple('Virtualenv', ['name', 'path', 'interpreter'])
return Virtualenv(name, path, interpreter)
def post_mkproject(args=None):
"""
Create a Sublime text project file on virtualenvwrapper project
creation.
"""
try:
venv = get_virtualenv()
except VirtualenvError:
sys.exit('You need to be inside a virtualenv for using subvenv.')
project_path_file = os.path.join(venv.path, '.project')
try:
with open(project_path_file, 'r') as f:
project_folder = f.readline().rstrip('\r\n')
except IOError:
sys.exit('Virtualenv project not found.\n')
create_sublime_project_file(project_folder, venv.name, venv.interpreter)
def create_sublime_project_file(project_folder, project_name, interpreter):
"""
Create a Sublime Text project file in the given project folder.
Args:
project_folder (str): path to project folder
project_name (str): name of the project
interpreter (str): path to the Python interpreter used for
the project
"""
sublime_file_name = "{}.sublime-project".format(project_name)
settings_text = {
"folders": [
{
"follow_symlinks": True,
"path": project_folder,
},
],
"settings": {
"python_interpreter": interpreter,
},
}
target_path = (os.path.join(project_folder, sublime_file_name))
try:
with open(target_path, 'w') as f:
f.write(json.dumps(settings_text, sort_keys=True, indent=4))
except IOError:
sys.exit(
'Cannot create file.\n\
Attempted path: {}'.format(project_folder)
)
@click.group(context_settings=HELP_COMMANDS)
def cli():
"""
Subvenv is a tool for creating virtualenv-friendly Sublime Text
project files.
It can be used as a standalone or as a plugin for Virtualenwrapper.
See https://github.com/Railslide/subvenv for more information.
"""
pass
@cli.command()
@click.option(
'--folder',
type=click.Path(),
help='Target folder for file creation.'
)
def make_project(folder=None):
"""
Create a Sublime project file for the current virtual environment.
If no target folder is specified, the file will be created in
the current working directory.
"""
if not folder:
folder = os.getcwd()
folder = os.path.abspath(folder)
try:
venv = get_virtualenv()
except VirtualenvError:
sys.exit('You need to be inside a virtualenv for using subvenv.')
create_sublime_project_file(folder, venv.name, venv.interpreter)
if __name__ == '__main__':
cli()
| Python | 0.000001 |
edb6a027aae1656d07112c0dcb455d6891a44992 | split around nan test is passing | test_data_processor.py | test_data_processor.py | import DataProcessor as dp
import numpy as np
def test_unsize_vector():
n = 3
a = np.ones(n)
b = np.append(a, np.array([np.nan, np.nan]))
c = dp.unsize_vector(a, n)
assert (a == c).all()
def test_time_vector():
numSamples = 100
sampleRate = 50
time = dp.time_vector(numSamples, sampleRate)
assert (time == np.linspace(0., 2. - 1. / 50., num=100)).all()
def test_split_around_nan():
# build an array of length 25 with some nan values
a = np.ones(25) * np.nan
b = np.arange(25)
for i in b:
if i not in [0, 5, 20, 24]:
a[i] = b[i]
# run the function and test the results
indices, arrays = dp.split_around_nan(a)
assert len(indices) == 7
assert indices[0] == (0, 1)
assert indices[1] == (1, 5)
assert indices[2] == (5, 6)
assert indices[3] == (6, 20)
assert indices[4] == (20, 21)
assert indices[5] == (21, 24)
assert indices[6] == (24, 25)
# build an array of length 25 with some nan values
a = np.ones(25) * np.nan
b = np.arange(25)
for i in b:
if i not in [5, 20]:
a[i] = b[i]
# run the function and test the results
indices, arrays = dp.split_around_nan(a)
assert len(indices) == 5
assert indices[0] == (0, 5)
assert indices[1] == (5, 6)
assert indices[2] == (6, 20)
assert indices[3] == (20, 21)
assert indices[4] == (21, 25)
a = np.array([np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, np.nan])
# run the function and test the results
indices, arrays = dp.split_around_nan(a)
assert len(indices) == 6
assert indices[0] == (0, 1)
assert indices[1] == (1, 4)
assert indices[2] == (4, 5)
assert indices[3] == (5, 6)
assert indices[4] == (6, 8)
assert indices[5] == (8, 9)
| import DataProcessor as dp
import numpy as np
def test_unsize_vector():
n = 3
a = np.ones(n)
b = np.append(a, np.array([np.nan, np.nan]))
c = dp.unsize_vector(a, n)
assert (a == c).all()
def test_time_vector():
numSamples = 100
sampleRate = 50
time = dp.time_vector(numSamples, sampleRate)
assert (time == np.linspace(0., 2. - 1. / 50., num=100)).all()
def test_split_around_nan():
# build an array of length 25 with some nan values
a = np.ones(25) * np.nan
b = np.arange(25)
for i in b:
if i not in [0, 5, 20, 24]:
a[i] = b[i]
# run the function and test the results
indices, arrays = dp.split_around_nan(a)
assert indices[0] == (1, 5)
assert indices[1] == (6, 20)
assert indices[2] == (21, 24)
# build an array of length 25 with some nan values
a = np.ones(25) * np.nan
b = np.arange(25)
for i in b:
if i not in [5, 20]:
a[i] = b[i]
# run the function and test the results
indices, arrays = dp.split_around_nan(a)
assert indices[0] == (0, 5)
assert indices[1] == (6, 20)
assert indices[2] == (21, 25)
| Python | 0.000296 |
e5c3ece6ebad2b7ab524e074fd982e7fc11497b1 | set better filter | watcher/tweakerswatcher.py | watcher/tweakerswatcher.py | import requests
import json
import os.path
from watcher.watcher import Watcher
class TweakersWatcher(Watcher):
watcher_name = 'Tweakers Pricewatch'
filename = 'site_tweakers.txt'
def parse_site(self):
url = 'https://tweakers.net/xmlhttp/xmlHttp.php?application=tweakbase&type=filter&action=deals&dayOffset=1&minRelativePriceDrop=0.4&maxRelativePriceDrop=1&minAbsolutePriceDrop=30&maxAbsolutePriceDrop=&minCurrentPrice=0&maxCurrentPrice=&minPrices=3&minViews=0&of=absolutePriceDrop&od=desc&output=json'
request = requests.get(url)
json_object = json.loads(request.text)
return json_object['data']['html']
def check_price_error(self):
url = 'https://tweakers.net/pricewatch/deals/#filter:q1ZKSaz0T0srTi1RsjLUUcpNrAhKzUksySxLDSjKTE51KcovgEhk5jkmFefnlJYgSxgZgGWcS4uKUvNKwBJKVhAxMKcYpheLoQZ6ZmCpsMzUcqA6g1oA'
message_text = 'Mogelijke prijsfout, check: {0}'.format(url)
html = self.parse_site()
if not os.path.isfile(self.filename):
self.write_to_file(self.filename, html)
exit(0)
else:
with open(self.filename, 'r') as f:
file_content = f.read()
if file_content != html:
self.send_telegram(self.watcher_name, message_text)
self.write_to_file(self.filename, html)
| import requests
import json
import os.path
from watcher.watcher import Watcher
class TweakersWatcher(Watcher):
watcher_name = 'Tweakers Pricewatch'
filename = 'site_tweakers.txt'
def parse_site(self):
url = 'https://tweakers.net/xmlhttp/xmlHttp.php?application=tweakbase&type=filter&action=deals&fromHash=1&currFilters=q1ZKSaz0T0srTi1RsjLUUcpNrAhKzUksySxLDSjKTE51KcovgEhk5jkmFefnlJYgSxgZgGWcS4uKUvNKwBJKVhAxMKcYpheLoQZ6ZmCpsMzUcqA6g1oA&output=json';
request = requests.get(url)
json_object = json.loads(request.text)
return json_object['data']['html']
def check_price_error(self):
url = 'https://tweakers.net/pricewatch/deals/#filter:q1ZKSaz0T0srTi1RsjLUUcpNrAhKzUksySxLDSjKTE51KcovgEhk5jkmFefnlJYgSxgZgGWcS4uKUvNKwBJKVhAxMKcYpheLoQZ6ZmCpsMzUcqA6g1oA'
message_text = 'Mogelijke prijsfout, check: {0}'.format(url)
html = self.parse_site()
if not os.path.isfile(self.filename):
self.write_to_file(self.filename, html)
exit(0)
else:
with open(self.filename, 'r') as f:
file_content = f.read()
if file_content != html:
self.send_telegram(self.watcher_name, message_text)
self.write_to_file(self.filename, html)
| Python | 0.000002 |
6f641efb983c88427b0ec7955792a48ecbc0e0ea | add checking to avoiding update status issue of event_cases | web/controller/strategy.py | web/controller/strategy.py | # -*- coding:utf-8 -*-
__author__ = 'Ulric Qin'
from web import app
from flask import request, jsonify
from web.model.strategy import Strategy
from frame import config
from fe_api import post2FeUpdateEventCase
import logging
log = logging.getLogger(__name__)
@app.route('/strategy/update', methods=['POST'])
def strategy_update_post():
sid = request.form['sid'].strip()
metric = request.form['metric'].strip()
tags = request.form['tags'].strip()
max_step = request.form['max_step'].strip()
priority = request.form['priority'].strip()
note = request.form['note'].strip()
func = request.form['func'].strip()
op = request.form['op'].strip()
right_value = request.form['right_value'].strip()
run_begin = request.form['run_begin'].strip()
run_end = request.form['run_end'].strip()
tpl_id = request.form['tpl_id'].strip()
data = {'id': sid}
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenstrategyupdated"
if not metric:
return jsonify(msg='metric is blank')
if not note:
return jsonify(msg='note is blank')
if metric == 'net.port.listen' and '=' not in tags:
return jsonify(msg='if metric is net.port.listen, tags should like port=22')
st = Strategy.get(sid)
need_reset = False
if st.func != func or st.right_value != right_value or st.op != op:
need_reset = True
elif st.metric != metric or st.tags != tags:
log.info("g 2")
need_reset = True
log.debug("need_reset: " + str(need_reset))
log.debug(str(st.to_json()))
if sid:
# update
Strategy.update_dict(
{
'metric': metric,
'tags': tags,
'max_step': max_step,
'priority': priority,
'func': func,
'op': op,
'right_value': right_value,
'note': note,
'run_begin': run_begin,
'run_end': run_end
},
'id=%s',
[sid]
)
if need_reset:
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
# insert
Strategy.insert(
{
'metric': metric,
'tags': tags,
'max_step': max_step,
'priority': priority,
'func': func,
'op': op,
'right_value': right_value,
'note': note,
'run_begin': run_begin,
'run_end': run_end,
'tpl_id': tpl_id
}
)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
@app.route('/strategy/<sid>')
def strategy_get(sid):
sid = int(sid)
s = Strategy.get(sid)
if not s:
return jsonify(msg='no such strategy')
return jsonify(msg='', data=s.to_json())
@app.route('/strategy/delete/<sid>')
def strategy_delete_get(sid):
sid = int(sid)
s = Strategy.get(sid)
data = {'id': sid}
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenstrategydeleted"
if not s:
return jsonify(msg='no such strategy')
Strategy.delete_one(sid)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
| # -*- coding:utf-8 -*-
__author__ = 'Ulric Qin'
from web import app
from flask import request, jsonify
from web.model.strategy import Strategy
from frame import config
from fe_api import post2FeUpdateEventCase
import logging
log = logging.getLogger(__name__)
@app.route('/strategy/update', methods=['POST'])
def strategy_update_post():
sid = request.form['sid'].strip()
metric = request.form['metric'].strip()
tags = request.form['tags'].strip()
max_step = request.form['max_step'].strip()
priority = request.form['priority'].strip()
note = request.form['note'].strip()
func = request.form['func'].strip()
op = request.form['op'].strip()
right_value = request.form['right_value'].strip()
run_begin = request.form['run_begin'].strip()
run_end = request.form['run_end'].strip()
tpl_id = request.form['tpl_id'].strip()
data = {'id': sid}
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenstrategyupdated"
if not metric:
return jsonify(msg='metric is blank')
if not note:
return jsonify(msg='note is blank')
if metric == 'net.port.listen' and '=' not in tags:
return jsonify(msg='if metric is net.port.listen, tags should like port=22')
if sid:
# update
Strategy.update_dict(
{
'metric': metric,
'tags': tags,
'max_step': max_step,
'priority': priority,
'func': func,
'op': op,
'right_value': right_value,
'note': note,
'run_begin': run_begin,
'run_end': run_end
},
'id=%s',
[sid]
)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
# insert
Strategy.insert(
{
'metric': metric,
'tags': tags,
'max_step': max_step,
'priority': priority,
'func': func,
'op': op,
'right_value': right_value,
'note': note,
'run_begin': run_begin,
'run_end': run_end,
'tpl_id': tpl_id
}
)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
@app.route('/strategy/<sid>')
def strategy_get(sid):
sid = int(sid)
s = Strategy.get(sid)
if not s:
return jsonify(msg='no such strategy')
return jsonify(msg='', data=s.to_json())
@app.route('/strategy/delete/<sid>')
def strategy_delete_get(sid):
sid = int(sid)
s = Strategy.get(sid)
data = {'id': sid}
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenstrategydeleted"
if not s:
return jsonify(msg='no such strategy')
Strategy.delete_one(sid)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
| Python | 0 |
2790a7c5f7f90eaafa543651a6cf273c9f40e6e1 | tidy engine | team1/engine.py | team1/engine.py | #! /usr/bin/env python
import os
import sys
import json
import difflib
#{ 'document': ['terms', 'terms'],
# 'document2': ['1','2','3'],
#}
def simple_search_engine(index, query):
return [doc_name for doc_name, terms in index.iteritems() if query in terms]
def weighted_search_engine(index, query):
results = {}
search_terms = query.split()
for doc_name, terms in index.iteritems():
matches = 0
for search_term in search_terms:
if search_term in terms:
matches += 1
rating = float(matches) / len(search_terms)
results[doc_name] = rating
return results
def guessy_weighted_search_engine(index, query):
results = {}
search_terms = query.split()
for doc_name, terms in index.iteritems():
matches = 0
for search_term in search_terms:
search_termys = difflib.get_close_matches(search_term, terms)
for search_termy in search_termys:
if search_termy in terms:
matches += 1
rating = float(matches) / len(search_terms)
results[doc_name] = rating
return results
def display_search_results(engine_name, results):
print(engine_name)
if not results:
print(' <No reults>')
return
for i, result in enumerate(results, 1):
print('%d. %s' % (i, result))
def sort_by_score(result_dict):
sorted_pairs = sorted(
result_dict.items(),
key=lambda res_score: res_score[1],
reverse=True
)
return [result for result, score in sorted_pairs if score > 0]
def load_index(filename):
if os.path.exists(filename):
return json.load(open(filename))
else:
print("Please run:\n\tpython get_documents.py > index.txt")
sys.exit()
if __name__ == '__main__':
index = load_index('index.txt')
search_query = raw_input('Search terms: ')
# New line for readability
print('')
display_search_results(
'Simple search engine returns:',
simple_search_engine(index=index, query=search_query)
)
print('')
weighted_results = weighted_search_engine(index=index, query=search_query)
display_search_results(
'Weighted search engine returns:',
sort_by_score(weighted_results)
)
print('')
guessy_weighted_results = guessy_weighted_search_engine(index=index, query=search_query)
display_search_results(
'Guessy weighted search engine returns:',
sort_by_score(guessy_weighted_results)
)
| #! /usr/bin/env python
import os
import sys
import json
import difflib
#{ 'document': ['terms', 'terms'],
# 'document2': ['1','2','3'],
#}
def simple_search_engine(index, query):
return [doc_name for doc_name, terms in index.iteritems() if query in terms]
def weighted_search_engine(index, query):
results = {}
search_terms = query.split()
for doc_name, terms in index.iteritems():
matches = 0
for search_term in search_terms:
if search_term in terms:
matches += 1
rating = float(matches) / len(search_terms)
results[doc_name] = rating
return results
def guessy_weighted_search_engine(index, query):
results = {}
search_terms = query.split()
for doc_name, terms in index.iteritems():
matches = 0
for search_term in search_terms:
search_termys = difflib.get_close_matches(search_term, terms)
for search_termy in search_termys:
if search_termy in terms:
matches += 1
rating = float(matches) / len(search_terms)
results[doc_name] = rating
return results
def display_search_results(engine_name, results):
print(engine_name)
if not results:
print(' <No reults>')
return
for i, result in enumerate(results, 1):
print('%d. %s' % (i, result))
def sort_by_score(result_dict):
return [result
for result, score in
sorted(result_dict.items(), key=lambda res_score: res_score[1])
if score > 0
]
def load_index(filename):
if os.path.exists(filename):
return json.load(open(filename))
else:
print("Please run:\n\tpython get_documents.py > index.txt")
sys.exit()
if __name__ == '__main__':
index = load_index('index.txt')
search_query = raw_input('Search terms: ')
print('')
display_search_results(
'Simple search engine returns:',
simple_search_engine(index=index, query=search_query)
)
# New line for readability
print('')
weighted_results = weighted_search_engine(index=index, query=search_query)
display_search_results(
'Weighted search engine returns:',
sort_by_score(weighted_results)
)
print('')
guessy_weighted_results = guessy_weighted_search_engine(index=index, query=search_query)
display_search_results(
'Guessy weighted search engine returns:',
sort_by_score(guessy_weighted_results)
)
| Python | 0.000001 |
c0566ab5f4dabdf0c366d6b6a32cbd8ca3fb4a75 | use idle_add callback for animation | test/animate.py | test/animate.py | import matplotlib
matplotlib.use('GTKAgg')
import pygrib, time ,gobject
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
# animation example.
grbs = pygrib.open('../sampledata/safrica.grib2')
# grab all "brightness temp" grib messages.
btemps = [grb for grb in grbs if grb['name']=='Brightness temperature']
lats, lons = grb.latlons()
projd = grb.projparams
grbs.close()
# create a map projection for the domain, plot 1st image on it.
m =\
Basemap(projection=projd['proj'],lat_ts=projd['lat_ts'],lon_0=projd['lon_0'],\
lat_0=projd['lat_0'],rsphere=(projd['a'],projd['b']),\
llcrnrlat=lats[0,0],urcrnrlat=lats[-1,-1],\
llcrnrlon=lons[0,0],urcrnrlon=lons[-1,-1],resolution='i')
plt.figure(figsize=(8,7))
m.drawcoastlines()
m.drawcountries()
grb = btemps[0]
im = m.imshow(grb['values'],interpolation='nearest',vmin=230,vmax=310)
plt.colorbar(orientation='horizontal')
m.drawparallels(np.arange(-80,10,10),labels=[1,0,0,0])
m.drawmeridians(np.arange(-80,81,20),labels=[0,0,0,1])
txt = plt.title(grb,fontsize=8)
manager = plt.get_current_fig_manager()
def updatefig(*args):
global cnt, loop, delay
grb = btemps[cnt]
im.set_data(grb['values'])
txt.set_text(repr(grb))
manager.canvas.draw()
if cnt==0: time.sleep(delay)
cnt = cnt+1
if cnt==len(btemps):
loop = loop + 1
print 'done loop = ',loop
if loop == loops:
print 'all done - close plot window to exit'
return False
else:
cnt = 0
return True
else:
return True
cnt = 0
delay = 5
loops = 4
loop = 0
gobject.idle_add(updatefig)
plt.show()
| import matplotlib
matplotlib.use('GTKAgg')
import pygrib
import matplotlib.pyplot as plt
import numpy as np
import time
from mpl_toolkits.basemap import Basemap
# animation example.
grbs = pygrib.open('../sampledata/safrica.grib2')
# grab all "brightness temp" grib messages.
btemps = [grb for grb in grbs if grb['name']=='Brightness temperature']
lats, lons = grb.latlons()
projd = grb.projparams
grbs.close()
print projd
# create a map projection for the domain, plot 1st image on it.
m =\
Basemap(projection=projd['proj'],lat_ts=projd['lat_ts'],lon_0=projd['lon_0'],\
lat_0=projd['lat_0'],rsphere=(projd['a'],projd['b']),\
llcrnrlat=lats[0,0],urcrnrlat=lats[-1,-1],\
llcrnrlon=lons[0,0],urcrnrlon=lons[-1,-1],resolution='i')
plt.ion() # set interactive mode on
plt.figure(figsize=(8,7))
m.drawcoastlines()
m.drawcountries()
grb = btemps[0]
im = m.imshow(grb['values'],interpolation='nearest',vmin=230,vmax=310)
plt.colorbar(orientation='horizontal')
m.drawparallels(np.arange(-80,10,10),labels=[1,0,0,0])
m.drawmeridians(np.arange(-80,81,20),labels=[0,0,0,1])
plt.title(grb,fontsize=8)
plt.draw()
# loop 4 times, plot all images sequentially.
for loop in range(4):
time.sleep(5)
for grb in btemps:
print grb
im.set_data(grb['values'])
plt.title(grb,fontsize=8)
plt.draw()
time.sleep(5)
| Python | 0.000006 |
65f5695b90054f73d7119f0c50be51f61de777fa | Print the time of checking status at github. | tardis/tests/tests_slow/runner.py | tardis/tests/tests_slow/runner.py | import argparse
import datetime
import json
import os
import time
import requests
from tardis import __githash__ as tardis_githash
parser = argparse.ArgumentParser(description="Run slow integration tests")
parser.add_argument("--yaml", dest="yaml_filepath",
help="Path to YAML config file for integration tests.")
parser.add_argument("--atomic-dataset", dest="atomic_dataset",
help="Path to atomic dataset.")
test_command = (
"python setup.py test --test-path=tardis/tests/tests_slow/test_integration.py "
"--args=\"-rs --integration-tests={0} --atomic-dataset={1} --remote-data\""
)
if __name__ == "__main__":
args = parser.parse_args()
while True:
gh_request = requests.get(
"https://api.github.com/repos/tardis-sn/tardis/branches/master"
)
gh_master_head_data = json.loads(gh_request.content)
gh_tardis_githash = gh_master_head_data['commit']['sha']
if gh_tardis_githash != tardis_githash:
os.system("git pull origin master")
os.system(test_command.format(args.yaml_filepath,
args.atomic_dataset))
else:
checked = datetime.datetime.now()
print "Up-to-date. Checked on {0} {1}".format(
checked.strftime("%d-%b-%Y"), checked.strftime("%H:%M:%S")
)
time.sleep(600)
| import argparse
import json
import os
import time
import requests
from tardis import __githash__ as tardis_githash
parser = argparse.ArgumentParser(description="Run slow integration tests")
parser.add_argument("--yaml", dest="yaml_filepath",
help="Path to YAML config file for integration tests.")
parser.add_argument("--atomic-dataset", dest="atomic_dataset",
help="Path to atomic dataset.")
test_command = (
"python setup.py test --test-path=tardis/tests/tests_slow/test_integration.py "
"--args=\"-rs --integration-tests={0} --atomic-dataset={1} --remote-data\""
)
if __name__ == "__main__":
args = parser.parse_args()
while True:
gh_request = requests.get(
"https://api.github.com/repos/tardis-sn/tardis/branches/master"
)
gh_master_head_data = json.loads(gh_request.content)
gh_tardis_githash = gh_master_head_data['commit']['sha']
if gh_tardis_githash != tardis_githash:
os.system("git pull origin master")
os.system(test_command.format(args.yaml_filepath,
args.atomic_dataset))
else:
time.sleep(600)
| Python | 0.000022 |
3bc6ab85f4fc2e3dba0e7c9d16a28fa370558021 | remove useless comment | tests/images.py | tests/images.py | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import hashlib
from goose import Goose
from goose.configuration import Configuration
from goose.utils import FileHelper
from base import BaseMockTests, MockResponse
from extractors import TestExtractionBase
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
class MockResponseImage(MockResponse):
def image_content(self, req):
md5_hash = hashlib.md5(req.get_full_url()).hexdigest()
current_test = self.cls._get_current_testname()
path = os.path.join(CURRENT_PATH, "data", "images", current_test, md5_hash)
path = os.path.abspath(path)
f = open(path, 'rb')
content = f.read()
f.close()
return content
def html_content(self, req):
current_test = self.cls._get_current_testname()
path = os.path.join(CURRENT_PATH, "data", "images", current_test, "%s.html" % current_test)
path = os.path.abspath(path)
return FileHelper.loadResourceFile(path)
def content(self, req):
if self.cls.data['url'] == req.get_full_url():
return self.html_content(req)
return self.image_content(req)
class ImageTests(TestExtractionBase):
"""\
Base Mock test case
"""
callback = MockResponseImage
def loadData(self):
"""\
"""
suite, module, cls, func = self.id().split('.')
path = os.path.join(CURRENT_PATH, "data", module, func, "%s.json" % func)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
self.data = json.loads(content)
def getArticle(self):
"""\
"""
# load test case data
self.loadData()
# basic configuration
# no image fetching
config = self.getConfig()
config.enable_image_fetching = True
# run goose
g = Goose(config=config)
return self.extract(g)
def test_basic_image(self):
article = self.getArticle()
fields = ['top_image']
self.runArticleAssertions(article=article, fields=fields)
| # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import hashlib
from goose import Goose
from goose.configuration import Configuration
from goose.utils import FileHelper
from base import BaseMockTests, MockResponse
from extractors import TestExtractionBase
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
class MockResponseImage(MockResponse):
def image_content(self, req):
md5_hash = hashlib.md5(req.get_full_url()).hexdigest()
current_test = self.cls._get_current_testname()
path = os.path.join(CURRENT_PATH, "data", "images", current_test, md5_hash)
path = os.path.abspath(path)
f = open(path, 'rb')
content = f.read()
f.close()
return content
def html_content(self, req):
current_test = self.cls._get_current_testname()
path = os.path.join(CURRENT_PATH, "data", "images", current_test, "%s.html" % current_test)
path = os.path.abspath(path)
return FileHelper.loadResourceFile(path)
def content(self, req):
if self.cls.data['url'] == req.get_full_url():
return self.html_content(req)
return self.image_content(req)
class ImageTests(TestExtractionBase):
"""\
Base Mock test case
"""
callback = MockResponseImage
def loadData(self):
"""\
"""
suite, module, cls, func = self.id().split('.')
path = os.path.join(CURRENT_PATH, "data", module, func, "%s.json" % func)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
self.data = json.loads(content)
def getArticle(self):
"""\
"""
# load test case data
self.loadData()
# basic configuration
# no image fetching
config = self.getConfig()
config.enable_image_fetching = True
# run goose
g = Goose(config=config)
return self.extract(g)
def test_basic_image(self):
article = self.getArticle()
fields = ['top_image']
self.runArticleAssertions(article=article, fields=fields)
# def test_2(self):
# article = self.getArticle()
| Python | 0 |
08a65747d608fcc530adf6291a95104d4348eae6 | apply RatingTargetMixin to test model | tests/models.py | tests/models.py | from __future__ import unicode_literals
from django.db import models
from generic_ratings.model_mixins import RatingTargetMixin
class TextSnippet(RatingTargetMixin, models.Model):
text = models.TextField()
user = models.ForeignKey('auth.User')
| from __future__ import unicode_literals
from django.db import models
class TextSnippet(models.Model):
text = models.TextField()
user = models.ForeignKey('auth.User')
| Python | 0 |
bff7989e5ce0f5cde4176ee0652578100201c90b | Update about.py | tincan/about.py | tincan/about.py | # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tincan.serializable_base import SerializableBase
from tincan.version import Version
from tincan.extensions import Extensions
class About(SerializableBase):
"""Stores info about this installation of `tincan`.
:param version: The versions supported. This attribute is required.
:type version: list of unicode
:param extensions: Custom user data. This attribute is optional.
:type extensions: :class:`tincan.Extensions`
"""
_props_req = [
'version',
]
_props = [
'extensions',
]
_props.extend(_props_req)
@property
def version(self):
"""Version for About
:setter: Sets the version. If None is provided, defaults to
`[tincan.Version.latest]`. If a string is provided,
makes a 1-element list containing the string.
:setter type: list | tuple | str | unicode | None
:rtype: list
"""
return self._version
@version.setter
def version(self, value):
def check_version(v):
"""Checks a single version string for validity. Raises
if invalid.
:param v: the version string to check
:type v: list of str or unicode | tuple of str or unicode
:raises ValueError
"""
if v in Version.supported:
return
# Construct the error message
if isinstance(value, (list, tuple)):
value_str = repr(v) + ' in ' + repr(value)
else:
value_str = repr(v)
msg = (
"Tried to set property 'version' in a 'tincan.%s' object "
"with an invalid value: %s\n"
"Allowed versions are: %s" %
(
self.__class__.__name__,
value_str,
', '.join(map(repr, Version.supported)),
)
)
raise ValueError(msg)
if value is None:
self._version = [Version.latest]
elif isinstance(value, basestring):
check_version(value)
self._version = [value]
elif isinstance(value, (list, tuple)):
for v in value:
check_version(v)
self._version = list(value)
else:
raise TypeError(
"Property 'version' in a 'tincan.%s' object must be set with a "
"list, tuple, str, unicode or None. Tried to set it with: %s" %
(
self.__class__.__name__,
repr(value),
))
@property
def extensions(self):
"""Extensions for About
:setter: Tries to convert to Extensions. If None is provided,
sets to an empty `tincan.Extensions` dict.
:setter type: :class:`tincan.Extensions` | dict | None
:rtype: :class:`tincan.Extensions`
"""
return self._extensions
@extensions.setter
def extensions(self, value):
if isinstance(value, Extensions):
self._extensions = value
elif value is None:
self._extensions = Extensions()
else:
try:
self._extensions = Extensions(value)
except Exception as e:
msg = (
"Property 'extensions' in a 'tincan.%s' object must be set with a "
"tincan.Extensions, dict, or None.\n\n" %
self.__class__.__name__,
)
msg += e.message
raise TypeError(msg)
@extensions.deleter
def extensions(self):
del self._extensions
| # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tincan.serializable_base import SerializableBase
from tincan.version import Version
from tincan.extensions import Extensions
class About(SerializableBase):
"""Stores info about this installation of `tincan`.
:param version: The versions supported. This attribute is required.
:type version: list of unicode
:param extensions: Custom user data. This attribute is optional.
:type extensions: :class:`tincan.Extensions`
"""
_props_req = [
'version',
]
_props = [
'extensions',
]
_props.extend(_props_req)
@property
def version(self):
"""Version for About
:setter: Sets the version. If None is provided, defaults to
`[tincan.Version.latest]`. If a string is provided,
makes a 1-element list containing the string.
:setter type: list | tuple | str | unicode | None
:rtype: list
"""
return self._version
@version.setter
def version(self, value):
def check_version(v):
"""Checks a single version string for validity. Raises
if invalid.
:param v: the version string to check
:type v: list of str or unicode | tuple of str or unicode
:raises ValueError
"""
if v in Version.supported:
return
# Construct the error message
if isinstance(value, (list, tuple)):
value_str = repr(v) + ' in ' + repr(value)
else:
value_str = repr(v)
msg = (
"Tried to set property 'version' in a 'tincan.%s' object "
"with an invalid value: %s\n"
"Allowed versions are: %s" %
(
self.__class__.__name__,
value_str,
', '.join(map(repr, Version.supported)),
)
)
raise ValueError(msg)
if value is None:
self._version = [Version.latest]
elif isinstance(value, basestring):
check_version(value)
self._version = [value]
elif isinstance(value, (list, tuple)):
for v in value:
check_version(v)
self._version = list(value)
else:
raise TypeError(
"Property 'version' in a 'tincan.%s' object must be set with a "
"list, tuple, str, unicode or None. Tried to set it with: %s" %
(
self.__class__.__name__,
repr(value),
))
@property
def extensions(self):
"""Extensions for About
:setter: Tries to convert to Extensions. If None is provided,
sets to an empty `tincan.Extensions` dict.
:setter type: :class:`tincan.Extensions` | dict | None
:rtype: :class:`tincan.Extensions`
"""
return self._extensions
@extensions.setter
def extensions(self, value):
if isinstance(value, Extensions):
self._extensions = value
elif value is None:
self._extensions = Extensions()
else:
try:
self._extensions = Extensions(value)
except Exception as e:
msg = (
"Property 'extensions' in a 'tincan.%s' object must be set with a "
"tincan.Extensions, dict, or None.\n\n" %
self.__class__.__name__,
)
msg += e.message
raise TypeError(msg)
@extensions.deleter
def extensions(self):
del self._extensions
| Python | 0 |
5935a9f0ec35774f95b32465134d88d3e087fd1b | Use newer sphinx if available. | tools/sphinx.py | tools/sphinx.py | # Simple Sphinx tool and builder.
import os
from SCons.Script import *
# Build sphinx documentation:
def _action_sphinx(target, source, env):
sourcedir = os.path.dirname(source[0].path)
outdir = os.path.dirname(target[0].path)
app = "%s %s %s %s" % (env['SPHINX_BUILD'], env['SPHINX_OPTS'],
sourcedir, outdir)
ret = env.Execute([app, 'tools/munge-sphinx-perl.pl'])
if not ret:
print "Build finished. The HTML pages are in " + outdir
return ret
def generate(env):
"""Add builders and construction variables for the sphinx tool."""
import SCons.Builder
builder = SCons.Builder.Builder(action=_action_sphinx)
# Use Unix 'install' rather than env.InstallAs(), due to scons bug #1751
install = SCons.Builder.Builder(action="install -d ${TARGET.dir} && " + \
"install -d ${TARGET.dir}/_static && " + \
"install -d ${TARGET.dir}/_sources && " + \
"install -d ${TARGET.dir}/modules && " + \
"install -d ${TARGET.dir}/_sources/modules && " + \
"install ${SOURCE.dir}/*.html ${TARGET.dir} && " + \
"install ${SOURCE.dir}/*.js ${TARGET.dir} && " + \
"install ${SOURCE.dir}/modules/*.html " + \
"${TARGET.dir}/modules && " + \
"install ${SOURCE.dir}/_sources/*.txt " + \
"${TARGET.dir}/_sources && " + \
"install ${SOURCE.dir}/_sources/modules/* " + \
"${TARGET.dir}/_sources/modules && " + \
"install ${SOURCE.dir}/_static/* ${TARGET.dir}/_static")
env.Append(BUILDERS = {'Sphinx': builder, 'SphinxInstall':install})
if os.path.exists('/usr/bin/sphinx-1.0-build'):
env.AppendUnique(SPHINX_BUILD='/usr/bin/sphinx-1.0-build')
else:
env.AppendUnique(SPHINX_BUILD='/usr/bin/sphinx-build')
env.AppendUnique(SPHINX_OPTS='-a -E -b html')
def exists(env):
"""Make sure sphinx tools exist."""
return env.Detect("sphinx")
| # Simple Sphinx tool and builder.
import os
from SCons.Script import *
# Build sphinx documentation:
def _action_sphinx(target, source, env):
sourcedir = os.path.dirname(source[0].path)
outdir = os.path.dirname(target[0].path)
app = "%s %s %s %s" % (env['SPHINX_BUILD'], env['SPHINX_OPTS'],
sourcedir, outdir)
ret = env.Execute([app, 'tools/munge-sphinx-perl.pl'])
if not ret:
print "Build finished. The HTML pages are in " + outdir
return ret
def generate(env):
"""Add builders and construction variables for the sphinx tool."""
import SCons.Builder
builder = SCons.Builder.Builder(action=_action_sphinx)
# Use Unix 'install' rather than env.InstallAs(), due to scons bug #1751
install = SCons.Builder.Builder(action="install -d ${TARGET.dir} && " + \
"install -d ${TARGET.dir}/_static && " + \
"install -d ${TARGET.dir}/_sources && " + \
"install -d ${TARGET.dir}/modules && " + \
"install -d ${TARGET.dir}/_sources/modules && " + \
"install ${SOURCE.dir}/*.html ${TARGET.dir} && " + \
"install ${SOURCE.dir}/*.js ${TARGET.dir} && " + \
"install ${SOURCE.dir}/modules/*.html " + \
"${TARGET.dir}/modules && " + \
"install ${SOURCE.dir}/_sources/*.txt " + \
"${TARGET.dir}/_sources && " + \
"install ${SOURCE.dir}/_sources/modules/* " + \
"${TARGET.dir}/_sources/modules && " + \
"install ${SOURCE.dir}/_static/* ${TARGET.dir}/_static")
env.Append(BUILDERS = {'Sphinx': builder, 'SphinxInstall':install})
env.AppendUnique(SPHINX_BUILD='/usr/bin/sphinx-build')
env.AppendUnique(SPHINX_OPTS='-a -E -b html')
def exists(env):
"""Make sure sphinx tools exist."""
return env.Detect("sphinx")
| Python | 0 |
19c59d3d80b2cf64880d428d0e762d4e262385a5 | Handle stale PostgreSQL (or others) more gracefully. Closes #3394. Thanks to flfr at stibo.com for the patch. | trac/db/pool.py | trac/db/pool.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
try:
import threading
except ImportError:
import dummy_threading as threading
threading._get_ident = lambda: 0
import time
from trac.db.util import ConnectionWrapper
class TimeoutError(Exception):
"""Exception raised by the connection pool when no connection has become
available after a given timeout."""
class PooledConnection(ConnectionWrapper):
"""A database connection that can be pooled. When closed, it gets returned
to the pool.
"""
def __init__(self, pool, cnx):
ConnectionWrapper.__init__(self, cnx)
self._pool = pool
def close(self):
if self.cnx:
self._pool._return_cnx(self.cnx)
self.cnx = None
def __del__(self):
self.close()
class ConnectionPool(object):
"""A very simple connection pool implementation."""
def __init__(self, maxsize, connector, **kwargs):
self._dormant = [] # inactive connections in pool
self._active = {} # active connections by thread ID
self._available = threading.Condition(threading.Lock())
self._maxsize = maxsize # maximum pool size
self._cursize = 0 # current pool size, includes active connections
self._connector = connector
self._kwargs = kwargs
def get_cnx(self, timeout=None):
start = time.time()
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
self._active[tid][0] += 1
return PooledConnection(self, self._active[tid][1])
while True:
if self._dormant:
cnx = self._dormant.pop()
try:
cnx.cursor() # check whether the connection is stale
break
except Exception:
cnx.close()
elif self._maxsize and self._cursize < self._maxsize:
cnx = self._connector.get_connection(**self._kwargs)
self._cursize += 1
break
else:
if timeout:
self._available.wait(timeout)
if (time.time() - start) >= timeout:
raise TimeoutError, 'Unable to get database ' \
'connection within %d seconds' \
% timeout
else:
self._available.wait()
self._active[tid] = [1, cnx]
return PooledConnection(self, cnx)
finally:
self._available.release()
def _return_cnx(self, cnx):
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
num, cnx_ = self._active.get(tid)
assert cnx is cnx_
if num > 1:
self._active[tid][0] = num - 1
else:
del self._active[tid]
if cnx not in self._dormant:
cnx.rollback()
if cnx.poolable:
self._dormant.append(cnx)
else:
self._cursize -= 1
self._available.notify()
finally:
self._available.release()
def shutdown(self):
self._available.acquire()
try:
for cnx in self._dormant:
cnx.cnx.close()
finally:
self._available.release()
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
try:
import threading
except ImportError:
import dummy_threading as threading
threading._get_ident = lambda: 0
import time
from trac.db.util import ConnectionWrapper
class TimeoutError(Exception):
"""Exception raised by the connection pool when no connection has become
available after a given timeout."""
class PooledConnection(ConnectionWrapper):
"""A database connection that can be pooled. When closed, it gets returned
to the pool.
"""
def __init__(self, pool, cnx):
ConnectionWrapper.__init__(self, cnx)
self._pool = pool
def close(self):
if self.cnx:
self._pool._return_cnx(self.cnx)
self.cnx = None
def __del__(self):
self.close()
class ConnectionPool(object):
"""A very simple connection pool implementation."""
def __init__(self, maxsize, connector, **kwargs):
self._dormant = [] # inactive connections in pool
self._active = {} # active connections by thread ID
self._available = threading.Condition(threading.Lock())
self._maxsize = maxsize # maximum pool size
self._cursize = 0 # current pool size, includes active connections
self._connector = connector
self._kwargs = kwargs
def get_cnx(self, timeout=None):
start = time.time()
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
self._active[tid][0] += 1
return PooledConnection(self, self._active[tid][1])
while True:
if self._dormant:
cnx = self._dormant.pop()
break
elif self._maxsize and self._cursize < self._maxsize:
cnx = self._connector.get_connection(**self._kwargs)
self._cursize += 1
break
else:
if timeout:
self._available.wait(timeout)
if (time.time() - start) >= timeout:
raise TimeoutError, 'Unable to get database ' \
'connection within %d seconds' \
% timeout
else:
self._available.wait()
self._active[tid] = [1, cnx]
return PooledConnection(self, cnx)
finally:
self._available.release()
def _return_cnx(self, cnx):
self._available.acquire()
try:
tid = threading._get_ident()
if tid in self._active:
num, cnx_ = self._active.get(tid)
assert cnx is cnx_
if num > 1:
self._active[tid][0] = num - 1
else:
del self._active[tid]
if cnx not in self._dormant:
cnx.rollback()
if cnx.poolable:
self._dormant.append(cnx)
else:
self._cursize -= 1
self._available.notify()
finally:
self._available.release()
def shutdown(self):
self._available.acquire()
try:
for cnx in self._dormant:
cnx.cnx.close()
finally:
self._available.release()
| Python | 0.000001 |
0f02210e4f57f0023a2be6b11059bc5754f4d467 | Update version.py | ttim/version.py | ttim/version.py | __version__='0.4.1'
#__build__='4.0.0.0'
| __version__='0.4.0'
#__build__='4.0.0.0'
| Python | 0 |
91a25c6933015dc09fdf63c6bca75dfaf6115c47 | fix test | backend/src/tests/backend/plugins/misc/test_filter_validators.py | backend/src/tests/backend/plugins/misc/test_filter_validators.py | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import unittest
from gosa.backend.plugins.misc.filter_validators import *
class FilterValidatorTests(unittest.TestCase):
def test_IsValidHostName(self):
filter = IsValidHostName()
(res, errors) = filter.process(None, None, ["www.gonicus.de"])
assert res == True
assert len(errors) == 0
(res, errors) = filter.process(None, None, ["1www.gonicus.de"])
assert res == False
assert len(errors) == 1
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_IsExistingDN(self, mockedRegistry):
# mockup ObjectIndex.search
mockedRegistry.return_value.search.return_value = []
# start the tests
filter = IsExistingDN()
props = { 'test': {
'value': ['test']
}}
(res, errors) = filter.process(props, 'test', ["test1"])
assert res is False
assert len(errors) == 1
mockedRegistry.return_value.search.return_value = [1]
(res, errors) = filter.process(props, 'test', ["test1"])
assert res is True
assert len(errors) == 0
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_IsExistingDnOfType(self, mockedRegistry):
# mockup ObjectIndex.search
mockedRegistry.return_value.search.return_value = []
# start the tests
filter = IsExistingDnOfType()
(res, errors) = filter.process(None, None, ["test"], "type")
assert res == False
assert len(errors) == 1
mockedRegistry.return_value.search.return_value = [1]
(res, errors) = filter.process(None, None, ["test"], "type")
assert res == True
assert len(errors) == 0
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_ObjectWithPropertyExists(self, mocked_registry):
mocked_registry.return_value.search.return_value = []
# start the tests
filter = ObjectWithPropertyExists()
(res, errors) = filter.process(None, None, ["test"], "type", "attr")
assert res is False
assert len(errors) == 1
mocked_registry.return_value.search.return_value = [1]
(res, errors) = filter.process(None, None, ["test"], "type", "attr")
assert res is True
assert len(errors) == 0
| # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import unittest
from gosa.backend.plugins.misc.filter_validators import *
class FilterValidatorTests(unittest.TestCase):
def test_IsValidHostName(self):
filter = IsValidHostName()
(res, errors) = filter.process(None, None, ["www.gonicus.de"])
assert res == True
assert len(errors) == 0
(res, errors) = filter.process(None, None, ["1www.gonicus.de"])
assert res == False
assert len(errors) == 1
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_IsExistingDN(self, mockedRegistry):
# mockup ObjectIndex.search
mockedRegistry.return_value.search.return_value = []
# start the tests
filter = IsExistingDN()
props = { 'test': {
'value': ['test']
}}
(res, errors) = filter.process(props, 'test', ["test"])
assert res is False
assert len(errors) == 1
mockedRegistry.return_value.search.return_value = [1]
(res, errors) = filter.process(props, 'test', ["test"])
assert res is True
assert len(errors) == 0
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_IsExistingDnOfType(self, mockedRegistry):
# mockup ObjectIndex.search
mockedRegistry.return_value.search.return_value = []
# start the tests
filter = IsExistingDnOfType()
(res, errors) = filter.process(None, None, ["test"], "type")
assert res == False
assert len(errors) == 1
mockedRegistry.return_value.search.return_value = [1]
(res, errors) = filter.process(None, None, ["test"], "type")
assert res == True
assert len(errors) == 0
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_ObjectWithPropertyExists(self, mocked_registry):
mocked_registry.return_value.search.return_value = []
# start the tests
filter = ObjectWithPropertyExists()
(res, errors) = filter.process(None, None, ["test"], "type", "attr")
assert res is False
assert len(errors) == 1
mocked_registry.return_value.search.return_value = [1]
(res, errors) = filter.process(None, None, ["test"], "type", "attr")
assert res is True
assert len(errors) == 0
| Python | 0.000002 |
1df785d6f337fa2dc9e42c808fab79a5a2827258 | remove line to run tests | hs_modflow_modelinstance/tests/__init__.py | hs_modflow_modelinstance/tests/__init__.py | from test_modflow_modelinstance_metadata import * | Python | 0.000002 | |
40a98808da485edeaa87bb30f0e0749401c500da | update documentation configuration | _doc/sphinxdoc/source/conf3/conf.py | _doc/sphinxdoc/source/conf3/conf.py | import sys
import os
import datetime
import re
import sphinx_bootstrap_theme
source_path = os.path.normpath(
os.path.join(
os.path.abspath(
os.path.split(__file__)[0]),
".."))
try:
from conf_base import *
except ImportError:
sys.path.append(source_path)
from conf_base import *
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
templates_path = [os.path.join(source_path, 'phdoc_static3')]
html_static_path = templates_path
if not os.path.exists(templates_path[0]):
raise FileNotFoundError(templates_path[0])
html_logo = "project_ico_small.png"
html_sidebars = {}
if html_theme == "bootstrap":
html_theme_options = {
'navbar_title': "home",
'navbar_site_name': "Site",
'navbar_links': [
("XD", "http://www.xavierdupre.fr", True),
("blog", "blog/main_0000.html", True),
("index", "genindex"),
],
'navbar_sidebarrel': False,
'navbar_pagenav': True,
'navbar_pagenav_name': "Page",
'globaltoc_depth': 3,
'globaltoc_includehidden': "true",
'navbar_class': "navbar navbar-inverse",
'navbar_fixed_top': "true",
'source_link_position': "footer",
'bootswatch_theme': "yeti",
# united = weird colors, sandstone=green, simplex=red, paper=trop bleu
# lumen: OK
# to try, yeti, flatly, paper
'bootstrap_version': "3",
}
blog_root = "http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx3/"
blog_background = False
html_context = {
'css_files': get_default_stylesheet() + ['_static/my-styles.css', '_static/gallery.css'],
}
| import sys
import os
import datetime
import re
import sphinx_bootstrap_theme
source_path = os.path.normpath(
os.path.join(
os.path.abspath(
os.path.split(__file__)[0]),
".."))
try:
from conf_base import *
except ImportError:
sys.path.append(source_path)
from conf_base import *
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
templates_path = [os.path.join(source_path, 'phdoc_static3')]
html_static_path = templates_path
if not os.path.exists(templates_path[0]):
raise FileNotFoundError(templates_path[0])
html_logo = "project_ico_small.png"
html_sidebars = {}
if html_theme == "bootstrap":
html_theme_options = {
'navbar_title': "home",
'navbar_site_name': "Site",
'navbar_links': [
("XD", "http://www.xavierdupre.fr", True),
("blog", "blog/main_0000.html", True),
("index", "genindex"),
],
'navbar_sidebarrel': False,
'navbar_pagenav': True,
'navbar_pagenav_name': "Page",
'globaltoc_depth': 3,
'globaltoc_includehidden': "true",
'navbar_class': "navbar navbar-inverse",
'navbar_fixed_top': "true",
'source_link_position': "nav",
'bootswatch_theme': "yeti",
# united = weird colors, sandstone=green, simplex=red, paper=trop bleu
# lumen: OK
# to try, yeti, flatly, paper
'bootstrap_version': "3",
}
blog_root = "http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx3/"
blog_background = False
html_context = {
'css_files': get_default_stylesheet() + ['_static/my-styles.css', '_static/gallery.css'],
}
| Python | 0.000001 |
32f0270ee3049e1a624d86f7f0a68bb4ea55c5f1 | Correct IrActionsActWindow.read overloading | smile_base/models/ir_actions.py | smile_base/models/ir_actions.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, models, SUPERUSER_ID, tools
from openerp.tools.safe_eval import safe_eval as eval
from ..tools import unquote
class IrActionsActWindow(models.Model):
_inherit = 'ir.actions.act_window'
@api.one
def _update_context(self):
eval_dict = {
'active_id': unquote("active_id"),
'active_ids': unquote("active_ids"),
'active_model': unquote("active_model"),
'uid': unquote("uid"),
'user': unquote("user"),
'context': self._context,
}
try:
context = eval(self.context or '{}', eval_dict) or {}
if 'act_window_id' not in context:
context['act_window_id'] = self.id
self.context = tools.ustr(context)
except:
pass
@api.model
def create(self, vals):
act_window = super(IrActionsActWindow, self).create(vals)
act_window._update_context()
return act_window
@api.multi
def write(self, vals):
res = super(IrActionsActWindow, self).write(vals)
self._update_context()
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
ids_int = isinstance(ids, (int, long))
if ids_int:
ids = [ids]
results = super(IrActionsActWindow, self).read(cr, uid, ids, fields, context, load)
# Evaluate context value with user
localdict = {
'active_model': unquote('active_model'),
'active_id': unquote('active_id'),
'active_ids': unquote('active_ids'),
'uid': unquote('uid'),
'context': unquote('context'),
'user': self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context),
}
for res in results:
if 'context' in res:
try:
with tools.mute_logger("openerp.tools.safe_eval"):
res['context'] = tools.ustr(eval(res['context'], localdict))
except:
continue
return results[0] if ids_int else results
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, models, tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.addons.base.ir.ir_actions import ir_actions_act_window
from ..tools import unquote
class IrActionsActWindow(models.Model):
_inherit = 'ir.actions.act_window'
@api.one
def _update_context(self):
eval_dict = {
'active_id': unquote("active_id"),
'active_ids': unquote("active_ids"),
'active_model': unquote("active_model"),
'uid': unquote("uid"),
'user': unquote("user"),
'context': self._context,
}
try:
context = eval(self.context or '{}', eval_dict) or {}
if 'act_window_id' not in context:
context['act_window_id'] = self.id
self.context = tools.ustr(context)
except:
pass
@api.model
def create(self, vals):
act_window = super(IrActionsActWindow, self).create(vals)
act_window._update_context()
return act_window
@api.multi
def write(self, vals):
res = super(IrActionsActWindow, self).write(vals)
self._update_context()
return res
@api.multi
def read(self, fields=None, load='_classic_read'):
results = super(ir_actions_act_window, self).read(fields, load)
# Evaluate context value with user
localdict = {
'active_model': unquote('active_model'),
'active_id': unquote('active_id'),
'active_ids': unquote('active_ids'),
'uid': unquote('uid'),
'context': unquote('context'),
'user': self.env.user,
}
for res in results:
if 'context' in res:
try:
with tools.mute_logger("openerp.tools.safe_eval"):
res['context'] = tools.ustr(eval(res['context'], localdict))
except:
continue
# Evaluate help
if not fields or 'help' in fields:
cr, uid, context = self.env.args
eval_dict = {
'active_model': context.get('active_model'),
'active_id': context.get('active_id'),
'active_ids': context.get('active_ids'),
'uid': uid,
}
for res in results:
model = res.get('res_model')
if model and self.pool.get(model):
try:
with tools.mute_logger("openerp.tools.safe_eval"):
eval_context = eval(res['context'] or "{}", eval_dict) or {}
except:
continue
custom_context = dict(context, **eval_context)
res['help'] = self.pool[model].get_empty_list_help(cr, uid, res.get('help', ""), context=custom_context)
return results
ir_actions_act_window.read = read
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.