commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
7700e8b9a5a62fce875156482170c4fbc4cae902 | Update shortcut template | swjblog/polls/views.py | swjblog/polls/views.py | from django.http import HttpResponse
from django.template import loader
from django.shortcuts import get_object_or_404, render
# Create your views here.
from .models import Question
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# template = loader.get_template('polls/index.html')
# context = {
# 'latest_question_list': latest_question_list,
# }
# return HttpResponse(template.render(context, request))
# shortcut
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id) | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
# Create your views here.
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id) | Python | 0.000001 |
0e780569b8d40f3b9599df4f7d4a457f23b3f54f | Make uploader work | stoneridge_uploader.py | stoneridge_uploader.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import requests
import stoneridge
class StoneRidgeUploader(object):
"""Takes the upload files created by the collator and uploads them to the
graph server
"""
def __init__(self):
self.url = stoneridge.get_config('upload', 'url')
def run(self):
file_pattern = os.path.join(stoneridge.outdir, 'upload_*.json')
upload_files = glob.glob(file_pattern)
files = {os.path.basename(fname): open(fname, 'rb')
for fname in upload_files}
requests.post(self.url, files=files)
for f in files.values():
f.close()
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_args()
uploader = StoneRidgeUploader()
uploader.run()
| #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import human_curl as requests
import stoneridge
class StoneRidgeUploader(object):
"""Takes the upload files created by the collator and uploads them to the
graph server
"""
def __init__(self):
self.url = stoneridge.get_config('upload', 'url')
def run(self):
file_pattern = os.path.join(stoneridge.outdir, 'upload_*.json')
upload_files = glob.glob(file_pattern)
for upload in upload_files:
fname = os.path.basename(upload)
with file(upload, 'rb') as f:
requests.post(self.url, files=((fname, f),))
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_args()
uploader = StoneRidgeUploader()
uploader.run()
| Python | 0.000035 |
00216fef47b24c7c4d371cb350db7305d85e7d7b | fix link to numpy func | cupy/random/__init__.py | cupy/random/__init__.py | import numpy as _numpy
def bytes(length):
"""Returns random bytes.
.. note:: This function is just a wrapper for :obj:`numpy.random.bytes`.
The resulting bytes are generated on the host (NumPy), not GPU.
.. seealso:: :meth:`numpy.random.bytes
<numpy.random.mtrand.RandomState.bytes>`
"""
# TODO(kmaehashi): should it be provided in CuPy?
return _numpy.random.bytes(length)
# import class and function
from cupy.random._distributions import beta # NOQA
from cupy.random._distributions import binomial # NOQA
from cupy.random._distributions import chisquare # NOQA
from cupy.random._distributions import dirichlet # NOQA
from cupy.random._distributions import exponential # NOQA
from cupy.random._distributions import f # NOQA
from cupy.random._distributions import gamma # NOQA
from cupy.random._distributions import geometric # NOQA
from cupy.random._distributions import gumbel # NOQA
from cupy.random._distributions import hypergeometric # NOQA
from cupy.random._distributions import laplace # NOQA
from cupy.random._distributions import logistic # NOQA
from cupy.random._distributions import lognormal # NOQA
from cupy.random._distributions import logseries # NOQA
from cupy.random._distributions import multivariate_normal # NOQA
from cupy.random._distributions import negative_binomial # NOQA
from cupy.random._distributions import noncentral_chisquare # NOQA
from cupy.random._distributions import noncentral_f # NOQA
from cupy.random._distributions import normal # NOQA
from cupy.random._distributions import pareto # NOQA
from cupy.random._distributions import poisson # NOQA
from cupy.random._distributions import power # NOQA
from cupy.random._distributions import rayleigh # NOQA
from cupy.random._distributions import standard_cauchy # NOQA
from cupy.random._distributions import standard_exponential # NOQA
from cupy.random._distributions import standard_gamma # NOQA
from cupy.random._distributions import standard_normal # NOQA
from cupy.random._distributions import standard_t # NOQA
from cupy.random._distributions import triangular # NOQA
from cupy.random._distributions import uniform # NOQA
from cupy.random._distributions import vonmises # NOQA
from cupy.random._distributions import wald # NOQA
from cupy.random._distributions import weibull # NOQA
from cupy.random._distributions import zipf # NOQA
from cupy.random._generator import get_random_state # NOQA
from cupy.random._generator import RandomState # NOQA
from cupy.random._generator import reset_states # NOQA
from cupy.random._generator import seed # NOQA
from cupy.random._generator import set_random_state # NOQA
from cupy.random._permutations import permutation # NOQA
from cupy.random._permutations import shuffle # NOQA
from cupy.random._sample import choice # NOQA
from cupy.random._sample import multinomial # NOQA
from cupy.random._sample import rand # NOQA
from cupy.random._sample import randint # NOQA
from cupy.random._sample import randn # NOQA
from cupy.random._sample import random_integers # NOQA
from cupy.random._sample import random_sample # NOQA
from cupy.random._sample import random_sample as random # NOQA
from cupy.random._sample import random_sample as ranf # NOQA
from cupy.random._sample import random_sample as sample # NOQA
| import numpy as _numpy
def bytes(length):
"""Returns random bytes.
.. note:: This function is just a wrapper for :meth:`numpy.random.bytes`.
The resulting bytes are generated on the host (NumPy), not GPU.
.. seealso:: :meth:`numpy.random.bytes
<numpy.random.mtrand.RandomState.bytes>`
"""
# TODO(kmaehashi): should it be provided in CuPy?
return _numpy.random.bytes(length)
# import class and function
from cupy.random._distributions import beta # NOQA
from cupy.random._distributions import binomial # NOQA
from cupy.random._distributions import chisquare # NOQA
from cupy.random._distributions import dirichlet # NOQA
from cupy.random._distributions import exponential # NOQA
from cupy.random._distributions import f # NOQA
from cupy.random._distributions import gamma # NOQA
from cupy.random._distributions import geometric # NOQA
from cupy.random._distributions import gumbel # NOQA
from cupy.random._distributions import hypergeometric # NOQA
from cupy.random._distributions import laplace # NOQA
from cupy.random._distributions import logistic # NOQA
from cupy.random._distributions import lognormal # NOQA
from cupy.random._distributions import logseries # NOQA
from cupy.random._distributions import multivariate_normal # NOQA
from cupy.random._distributions import negative_binomial # NOQA
from cupy.random._distributions import noncentral_chisquare # NOQA
from cupy.random._distributions import noncentral_f # NOQA
from cupy.random._distributions import normal # NOQA
from cupy.random._distributions import pareto # NOQA
from cupy.random._distributions import poisson # NOQA
from cupy.random._distributions import power # NOQA
from cupy.random._distributions import rayleigh # NOQA
from cupy.random._distributions import standard_cauchy # NOQA
from cupy.random._distributions import standard_exponential # NOQA
from cupy.random._distributions import standard_gamma # NOQA
from cupy.random._distributions import standard_normal # NOQA
from cupy.random._distributions import standard_t # NOQA
from cupy.random._distributions import triangular # NOQA
from cupy.random._distributions import uniform # NOQA
from cupy.random._distributions import vonmises # NOQA
from cupy.random._distributions import wald # NOQA
from cupy.random._distributions import weibull # NOQA
from cupy.random._distributions import zipf # NOQA
from cupy.random._generator import get_random_state # NOQA
from cupy.random._generator import RandomState # NOQA
from cupy.random._generator import reset_states # NOQA
from cupy.random._generator import seed # NOQA
from cupy.random._generator import set_random_state # NOQA
from cupy.random._permutations import permutation # NOQA
from cupy.random._permutations import shuffle # NOQA
from cupy.random._sample import choice # NOQA
from cupy.random._sample import multinomial # NOQA
from cupy.random._sample import rand # NOQA
from cupy.random._sample import randint # NOQA
from cupy.random._sample import randn # NOQA
from cupy.random._sample import random_integers # NOQA
from cupy.random._sample import random_sample # NOQA
from cupy.random._sample import random_sample as random # NOQA
from cupy.random._sample import random_sample as ranf # NOQA
from cupy.random._sample import random_sample as sample # NOQA
| Python | 0 |
5b1790664ad5268a1d1764b81d1fa7e8fea5aabe | Bump version number. | stormtracks/version.py | stormtracks/version.py | VERSION = (0, 5, 0, 6, 'alpha')
def get_version(form='short'):
if form == 'short':
return '.'.join([str(v) for v in VERSION[:4]])
elif form == 'long':
return '.'.join([str(v) for v in VERSION][:4]) + '-' + VERSION[4]
else:
raise ValueError('unrecognised form specifier: {0}'.format(form))
__version__ = get_version()
if __name__ == '__main__':
print(get_version())
| VERSION = (0, 5, 0, 5, 'alpha')
def get_version(form='short'):
if form == 'short':
return '.'.join([str(v) for v in VERSION[:4]])
elif form == 'long':
return '.'.join([str(v) for v in VERSION][:4]) + '-' + VERSION[4]
else:
raise ValueError('unrecognised form specifier: {0}'.format(form))
__version__ = get_version()
if __name__ == '__main__':
print(get_version())
| Python | 0 |
8b84b2ae83977e091ee33ce86e30bcc7cc5c08a2 | Allow apostrophe and forbid colon in thread names | chandl/util.py | chandl/util.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import sys
import hashlib
import unidecode
import six
import requests
import chandl
def bytes_fmt(num, suffix='B'):
"""
Turn a number of bytes into a more friendly representation, e.g. 2.5MiB.
:param num: The number of bytes to convert.
:param suffix: The unit suffix (defaults to 'B').
:return: The human-readable equivalent of the input size.
:raises ValueError: If num is not an integer.
"""
if not isinstance(num, six.integer_types):
raise ValueError('Byte count must be an integral type')
num = abs(num)
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if num < 1024.0:
return '{0:.1f} {1}{2}'.format(num, unit, suffix)
num /= 1024.0
return '{0:.1f} {1}{2}'.format(num, 'Yi', suffix)
def decode_cli_arg(arg):
"""
Turn a bytestring provided by `argparse` into unicode.
:param arg: The bytestring to decode.
:return: The argument as a unicode object.
:raises ValueError: If arg is None.
"""
if arg is None:
raise ValueError('Argument cannot be None')
if sys.version_info.major == 3:
# already decoded
return arg
return arg.decode(sys.getfilesystemencoding())
def expand_cli_args(args):
"""
Expand a list of possibly comma separated arguments, removing duplicates.
:param args: The list of arguments to expand.
:return: The set of unique arguments.
"""
items = set()
for arg in args: # "a.jpg,b.png"
for arg_ in [n.strip() for n in arg.split(',')]: # "a.jpg"|"b.jpg"
items.add(arg_)
return items
def make_filename(string):
"""
Turn a string into something that can be safely used as a file or directory
name.
:param string: The string to convert.
:return: The sanitised string.
:raises ValueError: If string is None.
"""
if string is None:
raise ValueError('String cannot be None')
safe = [' ', '.', '_', '\'']
joined = ''.join([c for c in unidecode.unidecode(string)
if c.isalnum() or c in safe]).strip()
if not joined:
raise ValueError('Filename would be empty')
return joined
def md5_file(path):
"""
Get the MD5 hash of a file.
:param path: The path of the file.
:return: The 32-character long lowercase hex representation of the
checksum.
:raises ValueError: If path is invalid.
"""
if not path:
raise ValueError('Path cannot be empty or None')
hash_ = hashlib.md5()
with open(path, 'rb') as fd:
for chunk in iter(lambda: fd.read(4096), b''):
hash_.update(chunk)
return hash_.hexdigest()
def log_level_from_vebosity(verbosity):
"""
Get the `logging` module log level from a verbosity.
:param verbosity: The number of times the `-v` option was specified.
:return: The corresponding log level.
"""
if verbosity == 0:
return logging.WARNING
if verbosity == 1:
return logging.INFO
return logging.DEBUG
def create_session():
"""
Create a requests session for issuing HTTP requests to 4chan.
:return: The created session.
"""
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'chandl/' + chandl.__version__
})
session = requests.Session()
session.headers = headers
return session
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import sys
import hashlib
import unidecode
import six
import requests
import chandl
def bytes_fmt(num, suffix='B'):
"""
Turn a number of bytes into a more friendly representation, e.g. 2.5MiB.
:param num: The number of bytes to convert.
:param suffix: The unit suffix (defaults to 'B').
:return: The human-readable equivalent of the input size.
:raises ValueError: If num is not an integer.
"""
if not isinstance(num, six.integer_types):
raise ValueError('Byte count must be an integral type')
num = abs(num)
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if num < 1024.0:
return '{0:.1f} {1}{2}'.format(num, unit, suffix)
num /= 1024.0
return '{0:.1f} {1}{2}'.format(num, 'Yi', suffix)
def decode_cli_arg(arg):
"""
Turn a bytestring provided by `argparse` into unicode.
:param arg: The bytestring to decode.
:return: The argument as a unicode object.
:raises ValueError: If arg is None.
"""
if arg is None:
raise ValueError('Argument cannot be None')
if sys.version_info.major == 3:
# already decoded
return arg
return arg.decode(sys.getfilesystemencoding())
def expand_cli_args(args):
"""
Expand a list of possibly comma separated arguments, removing duplicates.
:param args: The list of arguments to expand.
:return: The set of unique arguments.
"""
items = set()
for arg in args: # "a.jpg,b.png"
for arg_ in [n.strip() for n in arg.split(',')]: # "a.jpg"|"b.jpg"
items.add(arg_)
return items
def make_filename(string):
"""
Turn a string into something that can be safely used as a file or directory
name.
:param string: The string to convert.
:return: The sanitised string.
:raises ValueError: If string is None.
"""
if string is None:
raise ValueError('String cannot be None')
safe = [' ', '.', '_', ':']
joined = ''.join([c for c in unidecode.unidecode(string)
if c.isalnum() or c in safe]).strip()
if not joined:
raise ValueError('Filename would be empty')
return joined
def md5_file(path):
"""
Get the MD5 hash of a file.
:param path: The path of the file.
:return: The 32-character long lowercase hex representation of the
checksum.
:raises ValueError: If path is invalid.
"""
if not path:
raise ValueError('Path cannot be empty or None')
hash_ = hashlib.md5()
with open(path, 'rb') as fd:
for chunk in iter(lambda: fd.read(4096), b''):
hash_.update(chunk)
return hash_.hexdigest()
def log_level_from_vebosity(verbosity):
"""
Get the `logging` module log level from a verbosity.
:param verbosity: The number of times the `-v` option was specified.
:return: The corresponding log level.
"""
if verbosity == 0:
return logging.WARNING
if verbosity == 1:
return logging.INFO
return logging.DEBUG
def create_session():
"""
Create a requests session for issuing HTTP requests to 4chan.
:return: The created session.
"""
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'chandl/' + chandl.__version__
})
session = requests.Session()
session.headers = headers
return session
| Python | 0.000006 |
a82d419d17c67cfd7842cf104994b9ecbda96e94 | Delete existing libnccl before installing NCCL | perfkitbenchmarker/linux_packages/nccl.py | perfkitbenchmarker/linux_packages/nccl.py | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing NCCL installation function."""
import posixpath
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
flags.DEFINE_string('nccl_version', '2.5.6-2',
'NCCL version to install')
FLAGS = flags.FLAGS
GIT_REPO = 'https://github.com/NVIDIA/nccl.git'
def _Build(vm):
"""Installs the OpenMPI package on the VM."""
vm.RemoteCommand('[ -d "nccl" ] || git clone {git_repo} --branch v{version}'
.format(git_repo=GIT_REPO, version=FLAGS.nccl_version))
cuda_home = '/usr/local/cuda'
vm.InstallPackages('build-essential devscripts debhelper fakeroot')
env_vars = {}
env_vars['PATH'] = (r'{cuda_bin_path}:$PATH'
.format(cuda_bin_path=posixpath.join(cuda_home, 'bin')))
env_vars['CUDA_HOME'] = (r'{cuda_home}'.format(cuda_home=cuda_home))
env_vars['LD_LIBRARY_PATH'] = (r'{lib_path}:$LD_LIBRARY_PATH'
.format(lib_path=posixpath.join(
cuda_home, 'lib64')))
vm.RemoteCommand('cd nccl && {env} make -j 20 pkg.debian.build'
.format(env=vm_util.DictonaryToEnvString(env_vars)))
def AptInstall(vm):
"""Installs the NCCL package on the VM."""
_Build(vm)
vm.RemoteCommand('sudo rm -rf /usr/local/nccl2') # Preexisting NCCL in DLVM
vm.InstallPackages('{build}libnccl2_{nccl}+cuda{cuda}_amd64.deb '
'{build}libnccl-dev_{nccl}+cuda{cuda}_amd64.deb'
.format(
build='./nccl/build/pkg/deb/',
nccl=FLAGS.nccl_version,
cuda=FLAGS.cuda_toolkit_version))
| # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing NCCL installation function."""
import posixpath
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
flags.DEFINE_string('nccl_version', '2.5.6-2',
'NCCL version to install')
FLAGS = flags.FLAGS
GIT_REPO = 'https://github.com/NVIDIA/nccl.git'
def _Build(vm):
"""Installs the OpenMPI package on the VM."""
vm.RemoteCommand('[ -d "nccl" ] || git clone {git_repo} --branch v{version}'
.format(git_repo=GIT_REPO, version=FLAGS.nccl_version))
cuda_home = '/usr/local/cuda'
vm.InstallPackages('build-essential devscripts debhelper fakeroot')
env_vars = {}
env_vars['PATH'] = (r'{cuda_bin_path}:$PATH'
.format(cuda_bin_path=posixpath.join(cuda_home, 'bin')))
env_vars['CUDA_HOME'] = (r'{cuda_home}'.format(cuda_home=cuda_home))
env_vars['LD_LIBRARY_PATH'] = (r'{lib_path}:$LD_LIBRARY_PATH'
.format(lib_path=posixpath.join(
cuda_home, 'lib64')))
vm.RemoteCommand('cd nccl && {env} make -j 20 pkg.debian.build'
.format(env=vm_util.DictonaryToEnvString(env_vars)))
def AptInstall(vm):
"""Installs the NCCL package on the VM."""
_Build(vm)
vm.InstallPackages('{build}libnccl2_{nccl}+cuda{cuda}_amd64.deb '
'{build}libnccl-dev_{nccl}+cuda{cuda}_amd64.deb'
.format(
build='./nccl/build/pkg/deb/',
nccl=FLAGS.nccl_version,
cuda=FLAGS.cuda_toolkit_version))
| Python | 0.000001 |
13ae8cf8eddba1cf40d89307ba1c52480cbac472 | Bump version | async2rewrite/__init__.py | async2rewrite/__init__.py | """
Convert discord.py code using abstract syntax trees.
"""
__title__ = 'async2rewrite'
__author__ = 'Tyler Gibbs'
__version__ = '0.0.3'
__copyright__ = 'Copyright 2017 TheTrain2000'
__license__ = 'MIT'
from .main import *
| """
Convert discord.py code using abstract syntax trees.
"""
__title__ = 'async2rewrite'
__author__ = 'Tyler Gibbs'
__version__ = '0.0.2'
__copyright__ = 'Copyright 2017 TheTrain2000'
__license__ = 'MIT'
from .main import *
| Python | 0 |
0639158e539f0f1c1a6d4dac1753179429257017 | add django_pluralize template filter | source/base/helpers.py | source/base/helpers.py | import datetime
import logging
import os
from django.conf import settings
from django.template.defaultfilters import linebreaks as django_linebreaks,\
escapejs as django_escapejs, pluralize as django_pluralize
from jingo import register
from sorl.thumbnail import get_thumbnail
logger = logging.getLogger('base.helpers')
@register.filter
def linebreaks(string):
return django_linebreaks(string)
@register.filter
def escapejs(string):
return django_escapejs(string)
@register.function
def get_timestamp():
return datetime.datetime.now()
@register.filter
def dj_pluralize(string, arg='s'):
return django_pluralize(string, arg)
@register.function
def thumbnail(source, *args, **kwargs):
"""
Wraps sorl thumbnail with an additional 'default' keyword
https://github.com/mozilla/mozillians/blob/master/apps/common/helpers.py
"""
# Templates should never return an exception
try:
if not source.path:
source = kwargs.get('default')
# Handle PNG images a little more gracefully
# Make sure thumbnail call doesn't specifically set format
if not 'format' in kwargs:
filetype = source.path.split('.')[-1]
# If we have a PNG, don't default convert to JPG
if filetype.lower() == 'png':
kwargs['format'] = 'PNG'
return get_thumbnail(source, *args, **kwargs)
except Exception as e:
logger.error('Thumbnail had Exception: %s' % (e,))
source = getattr(settings, 'DEFAULT_IMAGE_SRC')
return get_thumbnail(source, *args, **kwargs)
| import datetime
import logging
import os
from django.conf import settings
from django.template.defaultfilters import linebreaks as django_linebreaks,\
escapejs as django_escapejs
from jingo import register
from sorl.thumbnail import get_thumbnail
logger = logging.getLogger('base.helpers')
@register.filter
def linebreaks(string):
return django_linebreaks(string)
@register.filter
def escapejs(string):
return django_escapejs(string)
@register.function
def get_timestamp():
return datetime.datetime.now()
@register.function
def thumbnail(source, *args, **kwargs):
"""
Wraps sorl thumbnail with an additional 'default' keyword
https://github.com/mozilla/mozillians/blob/master/apps/common/helpers.py
"""
# Templates should never return an exception
try:
if not source.path:
source = kwargs.get('default')
# Handle PNG images a little more gracefully
# Make sure thumbnail call doesn't specifically set format
if not 'format' in kwargs:
filetype = source.path.split('.')[-1]
# If we have a PNG, don't default convert to JPG
if filetype.lower() == 'png':
kwargs['format'] = 'PNG'
return get_thumbnail(source, *args, **kwargs)
except Exception as e:
logger.error('Thumbnail had Exception: %s' % (e,))
source = getattr(settings, 'DEFAULT_IMAGE_SRC')
return get_thumbnail(source, *args, **kwargs)
| Python | 0.000001 |
62694c2072e3499b843372166daeead8a6335a5e | Format with Black | comics/accounts/views.py | comics/accounts/views.py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from invitations.utils import get_invitation_model
from comics.accounts.models import Subscription
from comics.core.models import Comic
@login_required
def account_details(request):
return render(
request,
"accounts/details.html",
{"active": {"account": True, "account_details": True}},
)
@login_required
def secret_key(request):
"""Show and generate a new secret key for the current user"""
if request.method == "POST":
comics_profile = request.user.comics_profile
comics_profile.generate_new_secret_key()
comics_profile.save()
messages.info(request, "A new secret key was generated.")
return HttpResponseRedirect(reverse("secret_key"))
return render(
request,
"accounts/secret_key.html",
{"active": {"account": True, "secret_key": True}},
)
@login_required
def mycomics_toggle_comic(request):
"""Change a single comic in My comics"""
if request.method != "POST":
response = HttpResponse(status=405)
response["Allowed"] = "POST"
return response
comic = get_object_or_404(Comic, slug=request.POST["comic"])
if "add_comic" in request.POST:
subscription = Subscription(
userprofile=request.user.comics_profile, comic=comic
)
subscription.save()
if not request.is_ajax():
messages.info(request, 'Added "%s" to my comics' % comic.name)
elif "remove_comic" in request.POST:
subscriptions = Subscription.objects.filter(
userprofile=request.user.comics_profile, comic=comic
)
subscriptions.delete()
if not request.is_ajax():
messages.info(request, 'Removed "%s" from my comics' % comic.name)
if request.is_ajax():
return HttpResponse(status=204)
else:
return HttpResponseRedirect(reverse("mycomics_latest"))
@login_required
def mycomics_edit_comics(request):
"""Change multiple comics in My comics"""
if request.method != "POST":
response = HttpResponse(status=405)
response["Allowed"] = "POST"
return response
my_comics = request.user.comics_profile.comics.all()
for comic in my_comics:
if comic.slug not in request.POST:
subscriptions = Subscription.objects.filter(
userprofile=request.user.comics_profile, comic=comic
)
subscriptions.delete()
if not request.is_ajax():
messages.info(
request, 'Removed "%s" from my comics' % comic.name
)
for comic in Comic.objects.all():
if comic.slug in request.POST and comic not in my_comics:
subscription = Subscription(
userprofile=request.user.comics_profile, comic=comic
)
subscription.save()
if not request.is_ajax():
messages.info(request, 'Added "%s" to my comics' % comic.name)
if request.is_ajax():
return HttpResponse(status=204)
elif "HTTP_REFERER" in request.META:
return HttpResponseRedirect(request.META["HTTP_REFERER"])
else:
return HttpResponseRedirect(reverse("mycomics_latest"))
@login_required
def invite(request):
if request.method == "POST":
invitation_model = get_invitation_model()
invitation = invitation_model.create(
request.POST["email"], inviter=request.user
)
invitation.send_invitation(request)
messages.success(
'An invitation has been sent to "%s".' % invitation.email
)
invitations = request.user.invitation_set.all().order_by("-created")
return render(
request,
"accounts/invite.html",
{
"active": {"invite": True},
"invitations": invitations,
},
)
| from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from invitations.utils import get_invitation_model
from comics.accounts.models import Subscription
from comics.core.models import Comic
@login_required
def account_details(request):
return render(
request,
"accounts/details.html",
{"active": {"account": True, "account_details": True}},
)
@login_required
def secret_key(request):
"""Show and generate a new secret key for the current user"""
if request.method == "POST":
comics_profile = request.user.comics_profile
comics_profile.generate_new_secret_key()
comics_profile.save()
messages.info(request, "A new secret key was generated.")
return HttpResponseRedirect(reverse("secret_key"))
return render(
request,
"accounts/secret_key.html",
{"active": {"account": True, "secret_key": True}},
)
@login_required
def mycomics_toggle_comic(request):
"""Change a single comic in My comics"""
if request.method != "POST":
response = HttpResponse(status=405)
response["Allowed"] = "POST"
return response
comic = get_object_or_404(Comic, slug=request.POST["comic"])
if "add_comic" in request.POST:
subscription = Subscription(
userprofile=request.user.comics_profile, comic=comic
)
subscription.save()
if not request.is_ajax():
messages.info(request, 'Added "%s" to my comics' % comic.name)
elif "remove_comic" in request.POST:
subscriptions = Subscription.objects.filter(
userprofile=request.user.comics_profile, comic=comic
)
subscriptions.delete()
if not request.is_ajax():
messages.info(request, 'Removed "%s" from my comics' % comic.name)
if request.is_ajax():
return HttpResponse(status=204)
else:
return HttpResponseRedirect(reverse("mycomics_latest"))
@login_required
def mycomics_edit_comics(request):
"""Change multiple comics in My comics"""
if request.method != "POST":
response = HttpResponse(status=405)
response["Allowed"] = "POST"
return response
my_comics = request.user.comics_profile.comics.all()
for comic in my_comics:
if comic.slug not in request.POST:
subscriptions = Subscription.objects.filter(
userprofile=request.user.comics_profile, comic=comic
)
subscriptions.delete()
if not request.is_ajax():
messages.info(
request, 'Removed "%s" from my comics' % comic.name
)
for comic in Comic.objects.all():
if comic.slug in request.POST and comic not in my_comics:
subscription = Subscription(
userprofile=request.user.comics_profile, comic=comic
)
subscription.save()
if not request.is_ajax():
messages.info(request, 'Added "%s" to my comics' % comic.name)
if request.is_ajax():
return HttpResponse(status=204)
elif "HTTP_REFERER" in request.META:
return HttpResponseRedirect(request.META["HTTP_REFERER"])
else:
return HttpResponseRedirect(reverse("mycomics_latest"))
@login_required
def invite(request):
if request.method == "POST":
invitation_model = get_invitation_model()
invitation = invitation_model.create(
request.POST["email"], inviter=request.user
)
invitation.send_invitation(request)
messages.success(
'An invitation has been sent to "%s".' % invitation.email
)
invitations = request.user.invitation_set.all().order_by('-created')
return render(
request,
"accounts/invite.html",
{
"active": {"invite": True},
"invitations": invitations,
},
)
| Python | 0 |
d305e953d028b935333b86b4cffc58649b8a4652 | Twitter uses OAuth1 not OAuth2, dummy | hiptweet/tasks.py | hiptweet/tasks.py | import logging
import requests
from flask import Blueprint, jsonify
from requests_oauthlib import OAuth1Session
from hiptweet import celery
from hiptweet.models import HipChatGroup, HipChatRoom
from celery.utils.log import get_task_logger
# set up logging
logger = get_task_logger(__name__)
logger.setLevel(logging.INFO)
# create a Flask blueprint for getting task status info
tasks = Blueprint('tasks', __name__)
@tasks.route('/status/<task_id>')
def status(task_id):
result = celery.AsyncResult(task_id)
return jsonify({
"state": result.state,
"info": result.info,
})
def paginated_get(url, session=None, callback=None, **kwargs):
"""
Return a generator of results for this API call, based on the structure
of HipChat's API return values.
"""
session = session or requests.Session()
payload = {
"start-index": 0,
"max-results": 1000,
}
payload.update(kwargs)
while url:
resp = session.get(url, params=payload)
if callable(callback):
callback(resp)
resp.raise_for_status()
result = resp.json()
for item in result["items"]:
yield item
url = result.get("links", {}).get("next", "")
@celery.task(bind=True)
def fetch_room_names(self, group_id):
group = HipChatGroup.query.get(group_id)
install_info = group.install_info[0]
capabilities_resp = requests.get(install_info.capabilities_url)
capabilities_resp.raise_for_status()
base_api_url = (
capabilities_resp.json()["capabilities"]["hipchatApiProvider"]["url"]
)
rooms_info_url = base_api_url + "room"
twitter_token = group.twitter_oauth.token
session = OAuth1Session(
client_key=install_info.oauth_id,
client_secret=install_info.oauth_secret,
resource_owner_key=twitter_token['oauth_token'],
resource_owner_secret=twitter_token['oauth_token_secret'],
)
def update_state(resp):
if not resp.ok:
return
start_index = resp.json()["startIndex"]
self.update_state(state="STARTED", meta={"startIndex": start_index})
rooms_info = paginated_get(rooms_info_url, session=session, callback=update_state)
for room_info in rooms_info:
room_id = room_info['id']
room = HipChatRoom.query.get(room_id)
if not room:
room = HipChatRoom(id=room_id, group=group)
room.name = room_info["name"]
db.session.add(room)
db.session.commit()
| import logging
import requests
from flask import Blueprint, jsonify
from requests_oauthlib import OAuth2Session
from hiptweet import celery
from hiptweet.models import HipChatGroup, HipChatRoom
from celery.utils.log import get_task_logger
# set up logging
logger = get_task_logger(__name__)
logger.setLevel(logging.INFO)
# create a Flask blueprint for getting task status info
tasks = Blueprint('tasks', __name__)
@tasks.route('/status/<task_id>')
def status(task_id):
result = celery.AsyncResult(task_id)
return jsonify({
"state": result.state,
"info": result.info,
})
def paginated_get(url, session=None, callback=None, **kwargs):
"""
Return a generator of results for this API call, based on the structure
of HipChat's API return values.
"""
session = session or requests.Session()
payload = {
"start-index": 0,
"max-results": 1000,
}
payload.update(kwargs)
while url:
resp = session.get(url, params=payload)
if callable(callback):
callback(resp)
resp.raise_for_status()
result = resp.json()
for item in result["items"]:
yield item
url = result.get("links", {}).get("next", "")
@celery.task(bind=True)
def fetch_room_names(self, group_id):
group = HipChatGroup.query.get(group_id)
capabilities_url = group.install_info[0].capabilities_url
capabilities_resp = requests.get(capabilities_url)
capabilities_resp.raise_for_status()
base_api_url = (
capabilities_resp.json()["capabilities"]["hipchatApiProvider"]["url"]
)
rooms_info_url = base_api_url + "room"
session = OAuth2Session(token=group.twitter_oauth.token)
def update_state(resp):
if not resp.ok:
return
start_index = resp.json()["startIndex"]
self.update_state(state="STARTED", meta={"startIndex": start_index})
rooms_info = paginated_get(rooms_info_url, session=session, callback=update_state)
for room_info in rooms_info:
room_id = room_info['id']
room = HipChatRoom.query.get(room_id)
if not room:
room = HipChatRoom(id=room_id, group=group)
room.name = room_info["name"]
db.session.add(room)
db.session.commit()
| Python | 0.99914 |
aa4be6a435222003bf5e87df5c1f8d34394592fe | add celery conf | hiren/__init__.py | hiren/__init__.py | from github.celery import app as celery_app | Python | 0.999777 | |
a871f05ba94c34b1444468c46ed7895469059653 | Create member allow_origin | glarkconnector.py | glarkconnector.py | #!/usr/bin/python
"""Connector for the glark.io editor. """
__version__ = "0.1"
import BaseHTTPServer
import json
import os
import re
import sys
class ConnectorRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Request handler exposing a REST api to the underlying filesystem"""
server_version = "glarkconnector/" + __version__
allow_origin = "http://dev.galipette.org"
def do_GET(self):
"""Serve a GET request."""
# Route request.
print('Request path: ' + self.path)
if (self.path == '/files'):
self.route_get_list_files()
elif (re.match(r'/files/(.+)$', self.path)):
requested_file = re.match(r'/files/(.+)$', self.path).group(1)
self.route_get_file(requested_file)
else:
self.route_400()
def do_OPTIONS(self):
"""Serve a OPTIONS request."""
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", allow_origin)
self.send_header("Access-Control-Allow-Headers", "accept, origin, x-requested-with")
self.end_headers()
def do_HEAD(self):
"""Serve a HEAD request."""
raise NotImplemented
# ----------
# Routes:
def route_get_list_files(self):
try:
files = os.listdir(os.getcwd())
except os.error:
self.route_403()
return
self.jsend(files)
def route_get_file(self, requested_file):
if not self.is_in_directory(requested_file, os.getcwd()):
self.route_403()
else:
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
with open(os.path.realpath(requested_file), 'rb') as fp:
file_content = fp.read()
file_stat = os.fstat(fp.fileno())
file_size = str(file_stat[6])
file_mtime = str(file_stat.st_mtime)
except IOError:
self.route_404()
return
data = {'content': file_content, 'size': file_size, 'mtime': file_mtime}
self.jsend(data)
def route_400(self):
self.send_error(404, "Not a valid api route")
def route_403(self):
self.send_error(403, "Forbidden path")
def route_404(self):
self.send_error(404, "Not found")
# ----------
# Helpers
def jsend(self, data):
"""Send data in jsend format."""
formatted = {'status': 'success', 'data': data}
jsend = json.dumps(formatted)
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", allow_origin)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/json; charset=%s" % encoding)
self.send_header("Content-Length", str(len(jsend)))
self.end_headers()
self.wfile.write(jsend)
def is_in_directory(self, file_path, directory_path):
"""Check that file_path is inside directory_path or any of its
subdirectories, following symlinks."""
real_dir = os.path.realpath(directory_path)
real_file = os.path.realpath(file_path)
return os.path.commonprefix([real_file, real_dir]) == real_dir
def startConnector():
port = 3001
httpd = BaseHTTPServer.HTTPServer(("", port), ConnectorRequestHandler)
print("Serving at port " + str(port))
httpd.serve_forever()
if __name__ == '__main__':
startConnector()
| #!/usr/bin/python
"""Connector for the glark.io editor. """
__version__ = "0.1"
import BaseHTTPServer
import json
import os
import re
import sys
class ConnectorRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Request handler exposing a REST api to the underlying filesystem"""
server_version = "glarkconnector/" + __version__
def do_GET(self):
"""Serve a GET request."""
# Route request.
print('Request path: ' + self.path)
if (self.path == '/files'):
self.route_get_list_files()
elif (re.match(r'/files/(.+)$', self.path)):
requested_file = re.match(r'/files/(.+)$', self.path).group(1)
self.route_get_file(requested_file)
else:
self.route_400()
def do_OPTIONS(self):
"""Serve a OPTIONS request."""
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "http://dev.galipette.org")
self.send_header("Access-Control-Allow-Headers", "accept, origin, x-requested-with")
self.end_headers()
def do_HEAD(self):
"""Serve a HEAD request."""
raise NotImplemented
# ----------
# Routes:
def route_get_list_files(self):
try:
files = os.listdir(os.getcwd())
except os.error:
self.route_403()
return
self.jsend(files)
def route_get_file(self, requested_file):
if not self.is_in_directory(requested_file, os.getcwd()):
self.route_403()
else:
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
with open(os.path.realpath(requested_file), 'rb') as fp:
file_content = fp.read()
file_stat = os.fstat(fp.fileno())
file_size = str(file_stat[6])
file_mtime = str(file_stat.st_mtime)
except IOError:
self.route_404()
return
data = {'content': file_content, 'size': file_size, 'mtime': file_mtime}
self.jsend(data)
def route_400(self):
self.send_error(404, "Not a valid api route")
def route_403(self):
self.send_error(403, "Forbidden path")
def route_404(self):
self.send_error(404, "Not found")
# ----------
# Helpers
def jsend(self, data):
"""Send data in jsend format."""
formatted = {'status': 'success', 'data': data}
jsend = json.dumps(formatted)
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "http://dev.galipette.org")
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/json; charset=%s" % encoding)
self.send_header("Content-Length", str(len(jsend)))
self.end_headers()
self.wfile.write(jsend)
def is_in_directory(self, file_path, directory_path):
"""Check that file_path is inside directory_path or any of its
subdirectories, following symlinks."""
real_dir = os.path.realpath(directory_path)
real_file = os.path.realpath(file_path)
return os.path.commonprefix([real_file, real_dir]) == real_dir
def startConnector():
port = 3001
httpd = BaseHTTPServer.HTTPServer(("", port), ConnectorRequestHandler)
print("Serving at port " + str(port))
httpd.serve_forever()
if __name__ == '__main__':
startConnector()
| Python | 0 |
f343c9782ac0ae02ca056385aa4c6098399d0076 | Fix loop through jaydebeapi cursor because not iterable | atp_classes/TeradataDB.py | atp_classes/TeradataDB.py | import atp_classes, re, platform, os
class TeradataDB:
def __init__(self, host=None, port=None, username=None, password=None, database=None, auth_mech=None):
config = atp_classes.Config()
self.host = host or config.get_config()['database']['dataWarehouse']['host']
self.username = username or config.get_config()['database']['dataWarehouse']['username']
self.password = password or config.get_config()['database']['dataWarehouse']['password']
def execute_query(self, query_string):
result_rows = []
if platform.mac_ver()[0] != '':
import teradata
udaExec = teradata.UdaExec(appName="DataFetcher", version="1.0", logConsole=False)
with udaExec.connect(method="odbc", system=self.host, username=self.username, password=self.password)as conn:
with conn.cursor() as cur:
try:
print "executing query"
# Execute query
cur.execute(query_string)
print "done executing query"
# Get column names
columns = cur.description
# Fetch table results
for row in cur:
result_obj = {}
for index, val in enumerate(columns):
# Remove characters and dot which precedes column name for key values
result_obj[re.sub(r'.*[.]', '', val[0])] = str(row[index]).strip()
result_rows.append(result_obj)
except Exception, e:
return e
conn.close()
else:
import jaydebeapi
import jpype
try:
if not jpype.isJVMStarted():
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
jar = r'{lib_path_gss}{java_sep}{lib_path_jdbc}'.format(lib_path_gss=os.path.join(current_dir,"lib",'tdgssconfig.jar'),
java_sep=os.pathsep,
lib_path_jdbc=os.path.join(current_dir,'lib','terajdbc4.jar'))
args='-Djava.class.path=%s' % jar
if 'JVM_PATH' in os.environ:
jvm_path = os.environ['JVM_PATH']
else:
jvm_path = jpype.getDefaultJVMPath()
jpype.startJVM(jvm_path, args)
conn = jaydebeapi.connect('com.teradata.jdbc.TeraDriver','jdbc:teradata://{url}/USER={user},PASSWORD={password}'
.format(url=self.host, user=self.username, password=self.password))
cur = conn.cursor()
print "executing query"
# Execute query
cur.execute(query_string)
print "done executing query"
# Get column names
columns = cur.description
# Fetch table results
for row in cur.fetchall():
result_obj = {}
for index, val in enumerate(columns):
# Remove characters and dot which precedes column name for key values
result_obj[re.sub(r'.*[.]', '', val[0])] = str(row[index]).strip()
result_rows.append(result_obj)
conn.close()
except Exception, e:
return e
return result_rows
| import atp_classes, re, platform, os
class TeradataDB:
def __init__(self, host=None, port=None, username=None, password=None, database=None, auth_mech=None):
config = atp_classes.Config()
self.host = host or config.get_config()['database']['dataWarehouse']['host']
self.username = username or config.get_config()['database']['dataWarehouse']['username']
self.password = password or config.get_config()['database']['dataWarehouse']['password']
def execute_query(self, query_string):
result_rows = []
if platform.mac_ver()[0] != '':
import teradata
udaExec = teradata.UdaExec(appName="DataFetcher", version="1.0", logConsole=False)
with udaExec.connect(method="odbc", system=self.host, username=self.username, password=self.password)as conn:
with conn.cursor() as cur:
try:
print "executing query"
# Execute query
cur.execute(query_string)
print "done executing query"
# Get column names
columns = cur.description
# Fetch table results
for row in cur:
result_obj = {}
for index, val in enumerate(columns):
# Remove characters and dot which precedes column name for key values
result_obj[re.sub(r'.*[.]', '', val[0])] = str(row[index]).strip()
result_rows.append(result_obj)
except Exception, e:
return e
conn.close()
else:
import jaydebeapi
import jpype
try:
if not jpype.isJVMStarted():
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
jar = r'{lib_path_gss}{java_sep}{lib_path_jdbc}'.format(lib_path_gss=os.path.join(current_dir,"lib",'tdgssconfig.jar'),
java_sep=os.pathsep,
lib_path_jdbc=os.path.join(current_dir,'lib','terajdbc4.jar'))
args='-Djava.class.path=%s' % jar
if 'JVM_PATH' in os.environ:
jvm_path = os.environ['JVM_PATH']
else:
jvm_path = jpype.getDefaultJVMPath()
jpype.startJVM(jvm_path, args)
conn = jaydebeapi.connect('com.teradata.jdbc.TeraDriver','jdbc:teradata://{url}/USER={user},PASSWORD={password}'
.format(url=self.host, user=self.username, password=self.password))
cur = conn.cursor()
print "executing query"
# Execute query
cur.execute(query_string)
print "done executing query"
# Get column names
columns = cur.description
# Fetch table results
for row in cur:
result_obj = {}
for index, val in enumerate(columns):
# Remove characters and dot which precedes column name for key values
result_obj[re.sub(r'.*[.]', '', val[0])] = str(row[index]).strip()
result_rows.append(result_obj)
conn.close()
except Exception, e:
return e
return result_rows
| Python | 0.000022 |
e8583e6ad8c0a3d89fe4bcb063a776f1ad139447 | Update spoonerism.py | pythainlp/transliterate/spoonerism.py | pythainlp/transliterate/spoonerism.py | # -*- coding: utf-8 -*-
from pythainlp.transliterate import pronunciate
from pythainlp import thai_consonants
_list_consonants = list(thai_consonants.replace("ห", ""))
def puan(word: str, show_pronunciation: bool = True) -> str:
"""
Thai Spoonerism
This function converts Thai word to spoonerized.
It only supports words with 2 to 3 syllables.
:param str word: Thai word to be spoonerism
:param bool show_pronunciation: True (default) or False
:return: A string of Thai spoonerism word.
:rtype: str
:Example:
::
from pythainlp.transliterate import puan
puan("นาริน")
# output: 'นิน-รา'
puan("นาริน", False)
# output: 'นินรา'
"""
word = pronunciate(word, engine="w2p")
_list_char = []
_list_pron = word.split('-')
_mix_list = ""
if len(_list_pron) == 1:
return word
if show_pronunciation:
_mix_list = "-"
for i in _list_pron:
for j in i:
if j in _list_consonants:
_list_char.append(j)
break
elif "ห" in j and "หฺ" not in i:
_list_char.append(j)
break
list_w_char = list(zip(_list_pron, _list_char))
_list_w = []
if len(list_w_char) == 2:
_list_w.append(
list_w_char[1][0].replace(list_w_char[1][1], list_w_char[0][1], 1)
)
_list_w.append(
list_w_char[0][0].replace(list_w_char[0][1], list_w_char[1][1], 1)
)
elif len(list_w_char) == 3:
_list_w.append(_list_pron[0])
_list_w.append(
list_w_char[2][0].replace(list_w_char[2][1], list_w_char[1][1], 1)
)
_list_w.append(list_w_char[1][0].replace(
list_w_char[1][1], list_w_char[2][1], 1)
)
else: # > 3 syllables
raise ValueError(
"""{0} is more than 3 syllables.\n
It only supports words with 2 to 3 syllables.""".format(word)
)
if not show_pronunciation:
_list_w = [i.replace("หฺ", "") for i in _list_w]
return _mix_list.join(_list_w)
| # -*- coding: utf-8 -*-
from pythainlp.transliterate import pronunciate
from pythainlp import thai_consonants
_list_consonants = list(thai_consonants.replace("ห", ""))
def puan(word: str, show_pronunciation: bool = True) -> str:
"""
Thai Spoonerism
This function converts Thai word to spoonerized.
It only supports words with 2 to 3 syllables.
:param str word: Thai word to be spoonerism
:param bool show_pronunciation: True (default) or False
:return: A string of Thai spoonerism word.
:rtype: str
:Example:
::
from pythainlp.transliterate import puan
puan("นาริน")
# output: 'นิน-รา'
puan("นาริน", False)
# output: 'นินรา'
"""
word = pronunciate(word, engine="w2p")
_list_char = []
_list_pron = word.split('-')
_mix_list = ""
if len(_list_pron) == 1:
return word[0]
if show_pronunciation:
_mix_list = "-"
for i in _list_pron:
for j in i:
if j in _list_consonants:
_list_char.append(j)
break
elif "ห" in j and "หฺ" not in i:
_list_char.append(j)
break
list_w_char = list(zip(_list_pron, _list_char))
_list_w = []
if len(list_w_char) == 2:
_list_w.append(
list_w_char[1][0].replace(list_w_char[1][1], list_w_char[0][1], 1)
)
_list_w.append(
list_w_char[0][0].replace(list_w_char[0][1], list_w_char[1][1], 1)
)
elif len(list_w_char) == 3:
_list_w.append(_list_pron[0])
_list_w.append(
list_w_char[2][0].replace(list_w_char[2][1], list_w_char[1][1], 1)
)
_list_w.append(list_w_char[1][0].replace(
list_w_char[1][1], list_w_char[2][1], 1)
)
else: # > 3 syllables
raise ValueError(
"""{0} is more than 3 syllables.\n
It only supports words with 2 to 3 syllables.""".format(word)
)
if not show_pronunciation:
_list_w = [i.replace("หฺ", "") for i in _list_w]
return _mix_list.join(_list_w)
| Python | 0 |
9abe7a776c4b0a4995a1c3a3d16f02bcba93f12e | add sin flute | audio/fourier_an_audio.py | audio/fourier_an_audio.py | #Milton Orlando Sarria
#analisis espectral de sinusoides
import matplotlib.pyplot as plt
import numpy as np
from fourierFunc import fourierAn
import wav_rw as wp
from scipy.signal import get_window
filename1='sound/flute-A4.wav'
filename2='sound/violin-B3.wav'
#leer los archivos de audio
fs,x1=wp.wavread(filename1)
fs,x2=wp.wavread(filename2)
t=(np.arange(1,5*fs))/float(fs)
#crear dos ventanas
w1 = get_window('hamming', x1.size); w1 = w1 / sum(w1)
w2 = get_window('hamming', x2.size); w2 = w2 / sum(w2)
#calcular el espectro de las ondas
absY1,mY1,pY1=fourierAn(x1*w1)
absY2,mY2,pY2=fourierAn(x2*w2)
#vector de frecuencias, desde -fs/2 a fs/2 (-pi<w<pi)
f=np.linspace(-fs/2,fs/2,absY1.size)
#visualizar las dos ondas
plt.subplot(321)
plt.plot(x1)
plt.title('onda sin ruido')
plt.subplot(323)
plt.plot(absY1)
plt.title('Espectro onda 1')
plt.subplot(325)
plt.plot(pY1)
plt.title('fase onda 1')
plt.subplot(322)
plt.plot(x2)
plt.title('onda 2 ')
plt.subplot(324)
plt.plot(absY2)
plt.title('Espectro 2')
plt.subplot(326)
plt.plot(pY2)
plt.title('fase onda 2')
indx1=np.array([48355,49307,50265, 51222,52167])
indx2=np.array([48073,48606,49138, 50203])
f1=f[indx1]#np.array([443.7, 886.63, 1329.94])
f2=f[indx2]#np.array([312.6, 560.54, 808.01] )
A2=absY1[indx1]#np.array([0.02638, 0.13159, 0.03147])
A1=absY2[indx2]#np.array([0.0270,0.02018,0.00362])
p1=pY1[indx1]#np.array([-14.42432594, -70.36247253, -68.44787598])
p2=pY2[indx2]#np.array([-131.58657837, -428.93927002, -783.9352417 ])
y1=np.zeros(t.size)
y2=np.zeros(t.size)
for i in range(3):
fii=A1[i]*np.cos(2*np.pi*f1[i]*t+p1[i])
y1=y1+fii
fii=A2[i]*np.cos(2*np.pi*f2[i]*t+p2[i])
y2=y2+fii
plt.figure(2)
plt.subplot(211)
plt.plot(y1)
plt.title('onda 1')
plt.subplot(212)
plt.plot(y2)
plt.title('onda 2')
plt.show()
| #Milton Orlando Sarria
#analisis espectral de sinusoides
import matplotlib.pyplot as plt
import numpy as np
from fourierFunc import fourierAn
import wav_rw as wp
filename1='sound/flute-A4.wav'
filename2='sound/violin-B3.wav'
#leer los archivos de audio
fs,x1=wp.wavread(filename1)
fs,x2=wp.wavread(filename2)
t=(np.arange(1,5*fs))/float(fs)
#calcular el espectro de las ondas
absY1,mY1,pY1=fourierAn(x1)
absY2,mY2,pY2=fourierAn(x2)
#vector de frecuencias, desde -fs/2 a fs/2 (-pi<w<pi)
f=np.linspace(-fs/2,fs/2,absY1.size)
#visualizar las dos ondas
plt.subplot(321)
plt.plot(x1)
plt.title('onda sin ruido')
plt.subplot(323)
plt.plot(absY1)
plt.title('Espectro onda 1')
plt.subplot(325)
plt.plot(pY1)
plt.title('fase onda 1')
plt.subplot(322)
plt.plot(x2)
plt.title('onda 2 ')
plt.subplot(324)
plt.plot(absY2)
plt.title('Espectro 2')
plt.subplot(326)
plt.plot(pY2)
plt.title('fase onda 2')
#indx1=np.array([48355 49307 50260])
#indx1=np.array([48073 48606 49138]
f1=np.array([443.7, 886.63, 1329.94])
f2=np.array([312.6, 560.54, 808.01] )
A2=np.array([0.02638, 0.13159, 0.03147])
A1=np.array([0.0270,0.02018,0.00362])
y1=np.zeros(t.size)
y2=np.zeros(t.size)
for i in range(3):
fii=A1[i]*np.cos(2*np.pi*f1[i]*t)
y1=y1+fii
fii=A2[i]*np.cos(2*np.pi*f2[i]*t)
y2=y2+fii
plt.figure(2)
plt.subplot(211)
plt.plot(y1)
plt.title('onda 1')
plt.subplot(212)
plt.plot(y2)
plt.title('onda 2')
plt.show()
| Python | 0.999997 |
0b7f25c92a2d0798a535487aa5305a793e998214 | Fix line replacement logic | homely/general.py | homely/general.py | import os
from homely.engine import add
from homely.utils import filereplacer
def lineinfile(filename, contents, prefix=None, regex=None):
filename = os.path.expanduser(filename)
obj = LineInFile(filename=filename, contents=contents)
if prefix is not None:
obj.findprefix(prefix)
elif regex is not None:
obj.findregex(regex)
add(obj)
class UpdateHelper(object):
_kwargs = None
uniqueid = None
def __init__(self, **kwargs):
self._kwargs = kwargs
items = [self.__class__.__name__]
for key in sorted(self._kwargs):
items.extend([key, self._kwargs[key]])
self.uniqueid = repr(items)
def asdict(self):
return {"class": self.__class__.__name__, "kwargs": self._kwargs}
class LineInFile(UpdateHelper):
_filename = None
_contents = None
_findprefix = None
_findregex = None
def __init__(self, **kwargs):
super(LineInFile, self).__init__(**kwargs)
self._filename = kwargs["filename"]
self._contents = kwargs["contents"]
def findprefix(self, prefix):
self._findprefix = prefix
def findregex(self, regex):
self._findregex = regex
def isdone(self):
try:
with open(self._filename) as f:
for line in f.readlines():
if line.rstrip() == self._contents:
return True
except FileNotFoundError:
pass
return False
def descchanges(self):
return "Adding line to %s: %s" % (self._filename, self._contents)
def makechanges(self, prevchanges):
changes = {
"old_line": None,
}
if self._findprefix:
matchline = lambda line: line.startswith(self._findprefix)
elif self._findregex:
# FIXME: implement regex matching
raise Exception("FIXME: implement regex") # noqa
else:
matchline = lambda line: line.rstrip() == self._contents
with filereplacer(self._filename) as (tmp, orig):
modified = False
if orig is not None:
# read through the original file and look for a line to replace
for line in orig.readlines():
if not modified and matchline(line):
modified = True
tmp.write(self._contents)
# FIXME: respect the existing lines' line endings!
tmp.write("\n")
if "old_line" not in changes:
changes["old_line"] = line.rstrip()
else:
tmp.write(line)
# if we didn't write out the new line by replacing parts of the original, then we'll
# just have to pop the new line on the end
if not modified:
tmp.write(self._contents)
# FIXME: respect the existing lines' line endings!
tmp.write("\n")
changes["old_line"] = None
return changes
| import os
from homely.engine import add
from homely.utils import filereplacer
def lineinfile(filename, contents, prefix=None, regex=None):
filename = os.path.expanduser(filename)
obj = LineInFile(filename=filename, contents=contents)
if prefix is not None:
obj.findprefix(prefix)
elif regex is not None:
obj.findregex(regex)
add(obj)
class UpdateHelper(object):
_kwargs = None
uniqueid = None
def __init__(self, **kwargs):
self._kwargs = kwargs
items = [self.__class__.__name__]
for key in sorted(self._kwargs):
items.extend([key, self._kwargs[key]])
self.uniqueid = repr(items)
def asdict(self):
return {"class": self.__class__.__name__, "kwargs": self._kwargs}
class LineInFile(UpdateHelper):
_filename = None
_contents = None
_findprefix = None
_findregex = None
def __init__(self, **kwargs):
super(LineInFile, self).__init__(**kwargs)
self._filename = kwargs["filename"]
self._contents = kwargs["contents"]
def findprefix(self, prefix):
self._findprefix = prefix
def findregex(self, regex):
self._findregex = regex
def isdone(self):
try:
with open(self._filename) as f:
for line in f.readlines():
if line.rstrip() == self._contents:
return True
except FileNotFoundError:
pass
return False
def descchanges(self):
return "Adding line to %s: %s" % (self._filename, self._contents)
def makechanges(self, prevchanges):
changes = {
"old_line": None,
}
if self._findprefix:
matchline = lambda line: line.startswith(self._findprefix)
elif self._findregex:
# FIXME: implement regex matching
raise Exception("FIXME: implement regex") # noqa
else:
matchline = lambda line: line.rstrip() == self._contents
with filereplacer(self._filename) as (tmp, orig):
modified = False
if orig is not None:
# read through the original file and look for a line to replace
for line in orig.readlines():
if modified:
tmp.write(line)
elif matchline(line):
modified = True
tmp.write(self._contents)
# FIXME: respect the existing lines' line endings!
tmp.write("\n")
if "old_line" not in changes:
changes["old_line"] = line.rstrip()
# if we didn't write out the new line by replacing parts of the original, then we'll
# just have to pop the new line on the end
if not modified:
tmp.write(self._contents)
# FIXME: respect the existing lines' line endings!
tmp.write("\n")
changes["old_line"] = None
return changes
| Python | 0.000004 |
53b43e51c4d073dae4f3ccad896ba1744ca1284b | Update version | auth_backends/_version.py | auth_backends/_version.py | __version__ = '0.1.2' # pragma: no cover
| __version__ = '0.1.1' # pragma: no cover
| Python | 0 |
f57a2c9124da513734a8e4934b8a02903109077e | Remove hardcoded backgrount from molecule svg | girder/molecules/server/openbabel.py | girder/molecules/server/openbabel.py | from girder.api.rest import RestException
from openbabel import OBMol, OBConversion
import pybel
import re
inchi_validator = re.compile('InChI=[0-9]S?\/')
# This function only validates the first part. It does not guarantee
# that the entire InChI is valid.
def validate_start_of_inchi(inchi):
if not inchi_validator.match(inchi):
raise RestException('Invalid InChI: "' + inchi +'"', 400)
# gen3d should be true for 2D input formats such as inchi or smiles
def convert_str(str_data, in_format, out_format, gen3d=False, out_options=None):
# Make sure that the start of InChI is valid before passing it to
# Open Babel, or Open Babel will crash the server.
if in_format.lower() == 'inchi':
validate_start_of_inchi(str_data)
if out_options is None:
out_options = {}
obMol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.SetOutFormat(out_format)
conv.ReadString(obMol, str_data)
if gen3d:
# Generate 3D coordinates for the input
mol = pybel.Molecule(obMol)
mol.make3D()
for option, value in out_options.items():
conv.AddOption(option, conv.OUTOPTIONS, value)
return (conv.WriteString(obMol), conv.GetOutFormat().GetMIMEType())
def to_inchi(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
# Hackish for now, convert to xyz first...
conv.SetOutFormat('xyz')
conv.ReadString(mol, str_data)
xyz = conv.WriteString(mol)
# Now convert to inchi and inchikey.
mol = OBMol()
conv.SetInFormat('xyz')
conv.ReadString(mol, xyz)
conv.SetOutFormat('inchi')
inchi = conv.WriteString(mol).rstrip()
conv.SetOptions("K", conv.OUTOPTIONS)
inchikey = conv.WriteString(mol).rstrip()
return (inchi, inchikey)
def from_inchi(str_data, out_format):
return convert_str(str_data, 'inchi', out_format, True)
def to_smiles(str_data, in_format):
# This returns ["<smiles>", "chemical/x-daylight-smiles"]
# Keep only the first part.
# The smiles has returns at the end of it, and may contain
# a return in the middle with a common name. Get rid of
# all of these.
# Use canonical smiles
smiles = convert_str(str_data, in_format, 'can')[0].strip()
return smiles.split()[0]
def from_smiles(str_data, out_format):
return convert_str(str_data, 'smi', out_format, True)
def atom_count(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.NumAtoms()
def get_formula(str_data, in_format):
# Inchi must start with 'InChI='
if in_format == 'inchi' and not str_data.startswith('InChI='):
str_data = 'InChI=' + str_data
validate_start_of_inchi(str_data)
# Get the molecule using the "Hill Order" - i. e., C first, then H,
# and then alphabetical.
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.GetFormula()
def to_svg(str_data, in_format):
out_options = {
'b': 'none', # transparent background color
'B': 'black' # black bonds color
}
return convert_str(str_data, in_format, 'svg', out_options=out_options)[0]
| from girder.api.rest import RestException
from openbabel import OBMol, OBConversion
import pybel
import re
inchi_validator = re.compile('InChI=[0-9]S?\/')
# This function only validates the first part. It does not guarantee
# that the entire InChI is valid.
def validate_start_of_inchi(inchi):
if not inchi_validator.match(inchi):
raise RestException('Invalid InChI: "' + inchi +'"', 400)
# gen3d should be true for 2D input formats such as inchi or smiles
def convert_str(str_data, in_format, out_format, gen3d=False, out_options=None):
# Make sure that the start of InChI is valid before passing it to
# Open Babel, or Open Babel will crash the server.
if in_format.lower() == 'inchi':
validate_start_of_inchi(str_data)
if out_options is None:
out_options = {}
obMol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.SetOutFormat(out_format)
conv.ReadString(obMol, str_data)
if gen3d:
# Generate 3D coordinates for the input
mol = pybel.Molecule(obMol)
mol.make3D()
for option, value in out_options.items():
conv.AddOption(option, conv.OUTOPTIONS, value)
return (conv.WriteString(obMol), conv.GetOutFormat().GetMIMEType())
def to_inchi(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
# Hackish for now, convert to xyz first...
conv.SetOutFormat('xyz')
conv.ReadString(mol, str_data)
xyz = conv.WriteString(mol)
# Now convert to inchi and inchikey.
mol = OBMol()
conv.SetInFormat('xyz')
conv.ReadString(mol, xyz)
conv.SetOutFormat('inchi')
inchi = conv.WriteString(mol).rstrip()
conv.SetOptions("K", conv.OUTOPTIONS)
inchikey = conv.WriteString(mol).rstrip()
return (inchi, inchikey)
def from_inchi(str_data, out_format):
return convert_str(str_data, 'inchi', out_format, True)
def to_smiles(str_data, in_format):
# This returns ["<smiles>", "chemical/x-daylight-smiles"]
# Keep only the first part.
# The smiles has returns at the end of it, and may contain
# a return in the middle with a common name. Get rid of
# all of these.
# Use canonical smiles
smiles = convert_str(str_data, in_format, 'can')[0].strip()
return smiles.split()[0]
def from_smiles(str_data, out_format):
return convert_str(str_data, 'smi', out_format, True)
def atom_count(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.NumAtoms()
def get_formula(str_data, in_format):
# Inchi must start with 'InChI='
if in_format == 'inchi' and not str_data.startswith('InChI='):
str_data = 'InChI=' + str_data
validate_start_of_inchi(str_data)
# Get the molecule using the "Hill Order" - i. e., C first, then H,
# and then alphabetical.
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.GetFormula()
def to_svg(str_data, in_format):
return convert_str(str_data, in_format, 'svg')[0]
| Python | 0.00027 |
2321ddb5f6d7731597f4f122a87041933348f064 | Enable Unicode tests | gmn/src/d1_gmn/tests/test_unicode.py | gmn/src/d1_gmn/tests/test_unicode.py | # -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test handling of Unicode in D1 REST URLs and type elements
"""
from __future__ import absolute_import
import logging
import responses
import d1_gmn.tests.gmn_test_case
import d1_common
import d1_common.system_metadata
class TestUnicode(d1_gmn.tests.gmn_test_case.GMNTestCase):
@responses.activate
def test_1000(self, mn_client_v1_v2):
"""Unicode: GMN and libraries handle Unicode correctly"""
with d1_gmn.tests.gmn_mock.disable_auth():
tricky_unicode_str = self.load_sample_utf8_to_unicode(
'tricky_identifiers_unicode.utf8.txt'
)
for line in tricky_unicode_str.splitlines():
pid_unescaped, pid_escaped = line.split('\t')
logging.debug(u'Testing PID: {}'.format(pid_unescaped))
pid, sid, send_sciobj_str, send_sysmeta_pyxb = self.create_obj(
mn_client_v1_v2, pid=pid_unescaped, sid=True
)
recv_sciobj_str, recv_sysmeta_pyxb = self.get_obj(mn_client_v1_v2, pid)
assert d1_common.system_metadata.is_equivalent_pyxb(
send_sysmeta_pyxb, recv_sysmeta_pyxb, ignore_timestamps=True
)
assert pid == pid_unescaped
assert recv_sysmeta_pyxb.identifier.value() == pid_unescaped
mn_client_v1_v2.delete(pid)
| # -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test handling of Unicode in D1 REST URLs and type elements
"""
from __future__ import absolute_import
import logging
import pytest
import responses
import d1_gmn.tests.gmn_test_case
import d1_common
import d1_common.system_metadata
@pytest.mark.skip('TODO')
class TestUnicode(d1_gmn.tests.gmn_test_case.GMNTestCase):
@responses.activate
def test_1000(self):
"""Unicode: GMN and libraries handle Unicode correctly"""
def test(client):
tricky_unicode_str = self.load_sample_utf8_to_unicode(
'tricky_identifiers_unicode.utf8.txt'
)
for line in tricky_unicode_str.splitlines():
pid_unescaped, pid_escaped = line.split('\t')
logging.debug(u'Testing PID: {}'.format(pid_unescaped))
pid, sid, send_sciobj_str, send_sysmeta_pyxb = self.create_obj(
client, pid=pid_unescaped, sid=True
)
recv_sciobj_str, recv_sysmeta_pyxb = self.get_obj(client, pid)
# self.assertEquals(send_sciobj_str, recv_sciobj_str)
assert d1_common.system_metadata.is_equivalent_pyxb(
send_sysmeta_pyxb, recv_sysmeta_pyxb, ignore_timestamps=True
)
client.delete(pid)
with d1_gmn.tests.gmn_mock.disable_auth():
test(self.client_v1)
test(self.client_v2)
| Python | 0.000001 |
3868a4ef30835ed1904a37318013e20f2295a8a9 | Remove fantastic from COB theme | ckanext/cob/plugin.py | ckanext/cob/plugin.py | import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
def groups():
# Return a list of groups
return toolkit.get_action('group_list')(data_dict={'all_fields': True})
def dataset_count():
# Return a count of all datasets
result = toolkit.get_action('package_search')(data_dict={'rows': 1})
return result['count']
class CobPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.ITemplateHelpers)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
toolkit.add_public_directory(config_, 'public')
def get_helpers(self):
# Register cob_theme_* helper functions
return {'cob_theme_groups': groups,
'cob_theme_dataset_count': dataset_count}
| import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
def groups():
# Return a list of groups
return toolkit.get_action('group_list')(data_dict={'all_fields': True})
def dataset_count():
# Return a count of all datasets
result = toolkit.get_action('package_search')(data_dict={'rows': 1})
return result['count']
class CobPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.ITemplateHelpers)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
toolkit.add_public_directory(config_, 'public')
toolkit.add_resource('fanstatic', 'cob')
def get_helpers(self):
# Register cob_theme_* helper functions
return {'cob_theme_groups': groups,
'cob_theme_dataset_count': dataset_count}
| Python | 0 |
46c1c21c0190aa95f4fede8fa1d98bbae7cf38c5 | test mode defaults to true - fix for #21 | ckanext/doi/config.py | ckanext/doi/config.py | #!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
"""
from pylons import config
from paste.deploy.converters import asbool
TEST_PREFIX = '10.5072'
ENDPOINT = 'https://mds.datacite.org'
TEST_ENDPOINT = 'https://test.datacite.org/mds'
def get_test_mode():
"""
Get test mode as boolean
@return:
"""
return asbool(config.get("ckanext.doi.test_mode", True))
def get_prefix():
"""
Get the prefix to use for DOIs
@return: test prefix if we're in test mode, otherwise config prefix setting
"""
return TEST_PREFIX if get_test_mode() else config.get("ckanext.doi.prefix")
def get_endpoint():
"""
Get the DataCite endpoint
@return: test endpoint if we're in test mode
"""
return TEST_ENDPOINT if get_test_mode() else ENDPOINT
def get_site_url():
"""
Get the site URL
Try and use ckanext.doi.site_url but if that's not set use ckan.site_url
@return:
"""
site_url = config.get("ckanext.doi.site_url")
if not site_url:
site_url = config.get('ckan.site_url')
return site_url.rstrip('/') | #!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
"""
from pylons import config
from paste.deploy.converters import asbool
TEST_PREFIX = '10.5072'
ENDPOINT = 'https://mds.datacite.org'
TEST_ENDPOINT = 'https://test.datacite.org/mds'
def get_test_mode():
"""
Get test mode as boolean
@return:
"""
return asbool(config.get("ckanext.doi.test_mode"))
def get_prefix():
"""
Get the prefix to use for DOIs
@return: test prefix if we're in test mode, otherwise config prefix setting
"""
return TEST_PREFIX if get_test_mode() else config.get("ckanext.doi.prefix")
def get_endpoint():
"""
Get the DataCite endpoint
@return: test endpoint if we're in test mode
"""
return TEST_ENDPOINT if get_test_mode() else ENDPOINT
def get_site_url():
"""
Get the site URL
Try and use ckanext.doi.site_url but if that's not set use ckan.site_url
@return:
"""
site_url = config.get("ckanext.doi.site_url")
if not site_url:
site_url = config.get('ckan.site_url')
return site_url.rstrip('/') | Python | 0.000001 |
3c1b5c425109d24eca552e60e859d7d747607492 | Fix UP in TRRUST | indra/sources/trrust/processor.py | indra/sources/trrust/processor.py | from copy import deepcopy
from indra.databases import hgnc_client
from indra.statements import Agent, IncreaseAmount, DecreaseAmount, Evidence
class TrrustProcessor(object):
"""Processor to extract INDRA Statements from Trrust data frame.
Attributes
----------
df : pandas.DataFrame
The Trrust table to process.
statements : list[indra.statements.Statement]
The list of INDRA Statements extracted from the table.
"""
def __init__(self, df):
self.df = df
self.statements = []
def extract_statements(self):
"""Process the table to extract Statements."""
for _, (tf, target, effect, refs) in self.df.iterrows():
tf_agent = get_grounded_agent(tf)
target_agent = get_grounded_agent(target)
if effect == 'Activation':
stmt_cls = IncreaseAmount
elif effect == 'Repression':
stmt_cls = DecreaseAmount
else:
continue
pmids = refs.split(';')
for pmid in pmids:
stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid)
self.statements.append(stmt)
def make_stmt(stmt_cls, tf_agent, target_agent, pmid):
"""Return a Statement based on its type, agents, and PMID."""
ev = Evidence(source_api='trrust', pmid=pmid)
return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent),
evidence=[ev])
def get_grounded_agent(gene_name):
"""Return a grounded Agent based on an HGNC symbol."""
db_refs = {'TEXT': gene_name}
if gene_name in hgnc_map:
gene_name = hgnc_map[gene_name]
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if not hgnc_id:
hgnc_id = hgnc_client.get_current_hgnc_id(gene_name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id and ',' not in up_id:
db_refs['UP'] = up_id
agent = Agent(gene_name, db_refs=db_refs)
return agent
hgnc_map = {
'CTGF': 'CCN2',
'CYR61': 'CCN1',
'MKL1': 'MRTFA',
'NOV': 'CCN3',
'RFWD2': 'COP1',
'SALL4A': 'SALL4',
'STAT5': 'STAT5A',
'TRAP': 'ACP5',
'AES': 'TLE5',
'SEPT7': 'SEPTIN7'
}
| from copy import deepcopy
from indra.databases import hgnc_client
from indra.statements import Agent, IncreaseAmount, DecreaseAmount, Evidence
class TrrustProcessor(object):
"""Processor to extract INDRA Statements from Trrust data frame.
Attributes
----------
df : pandas.DataFrame
The Trrust table to process.
statements : list[indra.statements.Statement]
The list of INDRA Statements extracted from the table.
"""
def __init__(self, df):
self.df = df
self.statements = []
def extract_statements(self):
"""Process the table to extract Statements."""
for _, (tf, target, effect, refs) in self.df.iterrows():
tf_agent = get_grounded_agent(tf)
target_agent = get_grounded_agent(target)
if effect == 'Activation':
stmt_cls = IncreaseAmount
elif effect == 'Repression':
stmt_cls = DecreaseAmount
else:
continue
pmids = refs.split(';')
for pmid in pmids:
stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid)
self.statements.append(stmt)
def make_stmt(stmt_cls, tf_agent, target_agent, pmid):
"""Return a Statement based on its type, agents, and PMID."""
ev = Evidence(source_api='trrust', pmid=pmid)
return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent),
evidence=[ev])
def get_grounded_agent(gene_name):
"""Return a grounded Agent based on an HGNC symbol."""
db_refs = {'TEXT': gene_name}
if gene_name in hgnc_map:
gene_name = hgnc_map[gene_name]
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if not hgnc_id:
hgnc_id = hgnc_client.get_current_hgnc_id(gene_name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id:
db_refs['UP'] = up_id
agent = Agent(gene_name, db_refs=db_refs)
return agent
hgnc_map = {
'CTGF': 'CCN2',
'CYR61': 'CCN1',
'MKL1': 'MRTFA',
'NOV': 'CCN3',
'RFWD2': 'COP1',
'SALL4A': 'SALL4',
'STAT5': 'STAT5A',
'TRAP': 'ACP5',
'AES': 'TLE5',
'SEPT7': 'SEPTIN7'
}
| Python | 0.000001 |
bf8ab86d536570790d135f0f46c97ffb30a83535 | update background_substraction.py | background_subtraction.py | background_subtraction.py | # Reference: http://docs.opencv.org/master/db/d5c/tutorial_py_bg_subtraction.html
# requires opencv v3.1.0
import numpy as np
import cv2
# TODO: remove hard coded file name
cap = cv2.VideoCapture('videos/sample_video_2.mp4')
# Here are the 3 ways of background subtraction
# createBackgroundSubtractorMOG2 seems to give the best result. Need more testing.
fgbg = cv2.createBackgroundSubtractorMOG2()
#fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
#fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| # Reference: http://docs.opencv.org/master/db/d5c/tutorial_py_bg_subtraction.html
import numpy as np
import cv2
# TODO: remove hard coded file name
cap = cv2.VideoCapture('videos/sample_video_2.mp4')
# Here are the 3 ways of background subtraction
# createBackgroundSubtractorMOG2 seems to give the best result. Need more testing.
fgbg = cv2.createBackgroundSubtractorMOG2()
#fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
#fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| Python | 0.000001 |
1f815ad5cb7535132a60297808abfa959709ba65 | Fix redirect_to_login | daiquiri/files/views.py | daiquiri/files/views.py | import logging
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.views.generic import View
from .utils import file_exists, check_file, send_file
logger = logging.getLogger(__name__)
class FileView(View):
def get(self, request, file_path):
# append 'index.html' when the file_path is a directory
if not file_path or file_path.endswith('/'):
file_path += 'index.html'
if not file_exists(file_path):
logger.debug('%s not found', file_path)
raise Http404
if check_file(request.user, file_path):
return send_file(request, file_path)
else:
logger.debug('%s if forbidden', file_path)
if request.user.is_authenticated:
raise PermissionDenied
else:
return redirect_to_login(request.path_info)
# if nothing worked, return 404
raise Http404
| import logging
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import redirect
from django.views.generic import View
from .utils import file_exists, check_file, send_file
logger = logging.getLogger(__name__)
class FileView(View):
def get(self, request, file_path):
# append 'index.html' when the file_path is a directory
if not file_path or file_path.endswith('/'):
file_path += 'index.html'
if not file_exists(file_path):
logger.debug('%s not found', file_path)
raise Http404
if check_file(request.user, file_path):
return send_file(request, file_path)
else:
logger.debug('%s if forbidden', file_path)
if request.user.is_authenticated:
raise PermissionDenied
else:
return redirect('account_login')
# if nothing worked, return 404
raise Http404
| Python | 0.000011 |
3ce5f60102d5de7367a06e7412e9e31597e40a58 | revert to original hello world | click_tutorial/cli.py | click_tutorial/cli.py | import click
@click.command()
def cli():
click.echo("Hello, World!")
if __name__ == '__main__':
cli()
| import click
@click.argument('name')
@click.command()
def cli(name):
click.echo("Hello, {0}!".format(name))
if __name__ == '__main__':
cli()
| Python | 0.999999 |
2a1ec4c0ea1904d7ea4728c08514cf5e5fa3ca44 | migrate utils tests | paramnormal/tests/test_utils.py | paramnormal/tests/test_utils.py | # -*- coding: utf-8 -*-
import numpy
import nose.tools as nt
from numpy.random import seed
import numpy.testing as nptest
from paramnormal import paramnormal, utils
def test_greco_deco():
d1 = paramnormal.normal._process_args(mu=1, sigma=2)
d2 = paramnormal.normal._process_args(μ=1, σ=2)
expected = dict(loc=1, scale=2)
nt.assert_dict_equal(d1, expected)
nt.assert_dict_equal(d2, expected)
d1 = paramnormal.beta._process_args(alpha=1, beta=2)
d2 = paramnormal.beta._process_args(α=1, β=2)
expected = {'a': 1, 'b': 2, 'loc': 0, 'scale': 1}
nt.assert_dict_equal(d1, expected)
nt.assert_dict_equal(d2, expected)
d1 = paramnormal.gamma._process_args(k=1, theta=2)
d2 = paramnormal.gamma._process_args(k=1, θ=2)
expected = {'a': 1, 'loc': 0, 'scale': 2}
nt.assert_dict_equal(d1, expected)
nt.assert_dict_equal(d2, expected)
def test__pop_none():
expected_no_Nones = dict(a=1, b=2, c=3)
expected_some_Nones = dict(a=1, c=3)
expected_all_Nones = dict()
nt.assert_dict_equal(utils._pop_none(a=1, b=2, c=3), expected_no_Nones)
nt.assert_dict_equal(utils._pop_none(a=1, b=None, c=3), expected_some_Nones)
nt.assert_dict_equal(utils._pop_none(a=None, b=None, c=None), expected_all_Nones)
| # -*- coding: utf-8 -*-
import numpy
import nose.tools as nt
from numpy.random import seed
import numpy.testing as nptest
from paramnormal import process_args
def test_greco_deco():
if not process_args.PY2:
d1 = process_args.normal(mu=1, sigma=2)
d2 = process_args.normal(μ=1, σ=2)
expected = dict(loc=1, scale=2)
nt.assert_dict_equal(d1, expected)
nt.assert_dict_equal(d2, expected)
d1 = process_args.beta(alpha=1, beta=2)
d2 = process_args.beta(α=1, β=2)
expected = {'a': 1, 'b': 2, 'loc': 0, 'scale': 1}
nt.assert_dict_equal(d1, expected)
nt.assert_dict_equal(d2, expected)
d1 = process_args.gamma(k=1, theta=2)
d2 = process_args.gamma(k=1, θ=2)
expected = {'a': 1, 'scale': 2}
nt.assert_dict_equal(d1, expected)
nt.assert_dict_equal(d2, expected)
def test_uniform_high_low():
nt.assert_dict_equal(
process_args.uniform(low=4, high=9),
dict(loc=4, scale=5)
)
def test_uniform_width_low():
nt.assert_dict_equal(
process_args.uniform(low=4, width=9),
dict(loc=4, scale=9)
)
@nt.raises(ValueError)
def test_uniform_no_low():
process_args.uniform(high=9)
def test_normal():
nt.assert_dict_equal(
process_args.normal(mu=2, sigma=2.45),
dict(loc=2, scale=2.45)
)
nt.assert_dict_equal(
process_args.normal(mu=2, sigma=2.45, fit=True),
dict(floc=2, fscale=2.45)
)
def test_lognormal():
nt.assert_dict_equal(
process_args.lognormal(mu=2, sigma=2.45),
dict(scale=numpy.exp(2), s=2.45, loc=0)
)
nt.assert_dict_equal(
process_args.lognormal(mu=2, sigma=2.45, fit=True),
dict(fscale=numpy.exp(2), f0=2.45, floc=0)
)
@nt.raises(ValueError)
def test_lognormal_no_offset():
process_args.lognormal(offset=None)
def test_beta():
nt.assert_dict_equal(
process_args.beta(alpha=2, beta=5),
dict(a=2, b=5, loc=0, scale=1)
)
nt.assert_dict_equal(
process_args.beta(alpha=2, beta=5, fit=True),
dict(f0=2, f1=5, floc=0, fscale=1)
)
def test_chi_squared():
nt.assert_dict_equal(
process_args.chi_squared(k=5),
dict(df=5, loc=0, scale=1)
)
nt.assert_dict_equal(
process_args.chi_squared(k=5, fit=True),
dict(f0=5, floc=0, fscale=1)
)
def test_pareto():
nt.assert_dict_equal(
process_args.pareto(alpha=4.78),
dict(b=4.78)
)
nt.assert_dict_equal(
process_args.pareto(alpha=4.78, fit=True),
dict(f0=4.78)
)
def test_gamma():
nt.assert_dict_equal(
process_args.gamma(k=1, theta=2),
dict(a=1, scale=2)
)
nt.assert_dict_equal(
process_args.gamma(k=1, theta=2, fit=True),
dict(f0=1, fscale=2)
)
def test_weibull():
nt.assert_dict_equal(
process_args.weibull(k=2),
dict(c=2, loc=0, scale=1)
)
nt.assert_dict_equal(
process_args.weibull(k=2, fit=True),
dict(f0=2, floc=0, fscale=1)
)
| Python | 0.000001 |
d44250f60e9676618170bd61f8f6bc438078ef87 | Add celery settings. | base/config/production.py | base/config/production.py | " Production settings must be here. "
from .core import *
from os import path as op
SECRET_KEY = 'SecretKeyForSessionSigning'
ADMINS = frozenset([MAIL_USERNAME])
# flask.ext.collect
# -----------------
COLLECT_STATIC_ROOT = op.join(op.dirname(ROOTDIR), 'static')
# dealer
DEALER_PARAMS = dict(
backends=('git', 'mercurial', 'simple', 'null')
)
# FQUEST settings
# ---------------
AUTH_LOGIN_VIEW = 'fquest.index'
AUTH_PROFILE_VIEW = 'fquest.profile'
OAUTH_FACEBOOK = dict(
consumer_key='365449256868307',
consumer_secret='899b2ea26ca77122eef981f4712aeb04',
params=dict(
scope="user_status,user_likes,user_activities,user_questions,user_events,user_videos,user_groups,user_relationships,user_notes,user_photos,offline_access,publish_actions"
)
)
# Cache
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = 'localhost'
CACHE_KEY_PREFIX = 'poliglot'
# Database settings
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://fquest:fquest@localhost:5432/fquest_master'
# Celery settings
BROKER_URL = 'redis://localhost:6379/0'
# pymode:lint_ignore=W0614,W404
| " Production settings must be here. "
from .core import *
from os import path as op
SECRET_KEY = 'SecretKeyForSessionSigning'
ADMINS = frozenset([MAIL_USERNAME])
# flask.ext.collect
# -----------------
COLLECT_STATIC_ROOT = op.join(op.dirname(ROOTDIR), 'static')
# dealer
DEALER_PARAMS = dict(
backends=('git', 'mercurial', 'simple', 'null')
)
# FQUEST settings
# ---------------
AUTH_LOGIN_VIEW = 'fquest.index'
AUTH_PROFILE_VIEW = 'fquest.profile'
OAUTH_FACEBOOK = dict(
consumer_key='365449256868307',
consumer_secret='899b2ea26ca77122eef981f4712aeb04',
params=dict(
scope="user_status,user_likes,user_activities,user_questions,user_events,user_videos,user_groups,user_relationships,user_notes,user_photos,offline_access,publish_actions"
)
)
# Cache
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = 'localhost'
CACHE_KEY_PREFIX = 'poliglot'
# Database settings
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://fquest:fquest@localhost:5432/fquest_master'
# pymode:lint_ignore=W0614,W404
| Python | 0 |
4c977a313942074cccdd6756762c5545e650cdc7 | Switch to NumPy's `ndindex` in `_cdist_apply` | dask_distance/_utils.py | dask_distance/_utils.py | import functools
import itertools
import numpy
import dask
import dask.array
from . import _compat
from . import _pycompat
def _broadcast_uv(u, v):
u = _compat._asarray(u)
v = _compat._asarray(v)
U = u
if U.ndim == 1:
U = U[None]
V = v
if V.ndim == 1:
V = V[None]
if U.ndim != 2:
raise ValueError("u must be a 1-D or 2-D array.")
if V.ndim != 2:
raise ValueError("v must be a 1-D or 2-D array.")
U = dask.array.repeat(U[:, None], len(V), axis=1)
V = dask.array.repeat(V[None, :], len(U), axis=0)
return U, V
def _unbroadcast_uv(u, v, result):
u = _compat._asarray(u)
v = _compat._asarray(v)
if v.ndim == 1:
result = result[:, 0]
if u.ndim == 1:
result = result[0]
return result
def _broadcast_uv_wrapper(func):
@functools.wraps(func)
def _wrapped_broadcast_uv(u, v):
U, V = _broadcast_uv(u, v)
result = func(U, V)
result = _unbroadcast_uv(u, v, result)
return result
return _wrapped_broadcast_uv
def _cdist_apply(U, V, metric):
result = numpy.empty(U.shape[:-1], dtype=float)
for i in numpy.ndindex(result.shape):
result[i] = metric(U[i], V[i])
return result
def _bool_cmp_cnts(U, V):
U = _compat._asarray(U)
V = _compat._asarray(V)
U = U.astype(bool)
V = V.astype(bool)
U_01 = [~U, U]
V_01 = [~V, V]
UV_cmp_cnts = numpy.empty((2, 2), dtype=object)
UV_ranges = [_pycompat.irange(e) for e in UV_cmp_cnts.shape]
for i, j in itertools.product(*UV_ranges):
UV_cmp_cnts[i, j] = (U_01[i] & V_01[j]).sum(axis=-1, dtype=float)
for i in _pycompat.irange(UV_cmp_cnts.ndim - 1, -1, -1):
UV_cmp_cnts2 = UV_cmp_cnts[..., 0]
for j in itertools.product(*(UV_ranges[:i])):
UV_cmp_cnts2[j] = dask.array.stack(UV_cmp_cnts[j].tolist(), axis=0)
UV_cmp_cnts = UV_cmp_cnts2
UV_cmp_cnts = UV_cmp_cnts[()]
return UV_cmp_cnts
| import functools
import itertools
import numpy
import dask
import dask.array
from . import _compat
from . import _pycompat
def _broadcast_uv(u, v):
u = _compat._asarray(u)
v = _compat._asarray(v)
U = u
if U.ndim == 1:
U = U[None]
V = v
if V.ndim == 1:
V = V[None]
if U.ndim != 2:
raise ValueError("u must be a 1-D or 2-D array.")
if V.ndim != 2:
raise ValueError("v must be a 1-D or 2-D array.")
U = dask.array.repeat(U[:, None], len(V), axis=1)
V = dask.array.repeat(V[None, :], len(U), axis=0)
return U, V
def _unbroadcast_uv(u, v, result):
u = _compat._asarray(u)
v = _compat._asarray(v)
if v.ndim == 1:
result = result[:, 0]
if u.ndim == 1:
result = result[0]
return result
def _broadcast_uv_wrapper(func):
@functools.wraps(func)
def _wrapped_broadcast_uv(u, v):
U, V = _broadcast_uv(u, v)
result = func(U, V)
result = _unbroadcast_uv(u, v, result)
return result
return _wrapped_broadcast_uv
def _cdist_apply(U, V, metric):
result = numpy.empty(U.shape[:-1], dtype=float)
for i in itertools.product(*[_pycompat.irange(e) for e in result.shape]):
result[i] = metric(U[i], V[i])
return result
def _bool_cmp_cnts(U, V):
U = _compat._asarray(U)
V = _compat._asarray(V)
U = U.astype(bool)
V = V.astype(bool)
U_01 = [~U, U]
V_01 = [~V, V]
UV_cmp_cnts = numpy.empty((2, 2), dtype=object)
UV_ranges = [_pycompat.irange(e) for e in UV_cmp_cnts.shape]
for i, j in itertools.product(*UV_ranges):
UV_cmp_cnts[i, j] = (U_01[i] & V_01[j]).sum(axis=-1, dtype=float)
for i in _pycompat.irange(UV_cmp_cnts.ndim - 1, -1, -1):
UV_cmp_cnts2 = UV_cmp_cnts[..., 0]
for j in itertools.product(*(UV_ranges[:i])):
UV_cmp_cnts2[j] = dask.array.stack(UV_cmp_cnts[j].tolist(), axis=0)
UV_cmp_cnts = UV_cmp_cnts2
UV_cmp_cnts = UV_cmp_cnts[()]
return UV_cmp_cnts
| Python | 0 |
bd712dad2709ba31be89f48f283084d5894cb378 | Replace dot in archive thumbnail name by underscore | ipol_demo/modules/core/archive.py | ipol_demo/modules/core/archive.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Helper functions for core, related to the archive module.
"""
import gzip
import json
import os
import traceback
from collections import OrderedDict
import requests
from ipolutils.utils import thumbnail
def create_thumbnail(src_file):
"""
Create thumbnail when possible from file to archive in run folder,
returns the filepath of thumbnail when created.
"""
thumb_height = 128
if not os.path.exists(src_file):
return False
thumb_name = os.path.basename(src_file).replace(".", "_")
thumb_name = thumb_name.lower() + '_thumbnail.jpeg'
thumb_file = os.path.join(os.path.dirname(src_file), thumb_name)
try:
thumbnail(src_file, thumb_height, thumb_file)
except Exception:
return False
return thumb_file
def send_to_archive(demo_id, work_dir, request, ddl_archive, res_data, host_name):
"""
Prepare an execution folder for archiving an experiment (thumbnails).
Collect information and parameters.
Send data to the archive module.
"""
# let's add all the parameters
parameters = OrderedDict()
blobs = []
for key, values in ddl_archive.items():
if key == 'params':
for p in values:
if p in res_data['params']:
parameters[p] = res_data['params'][p]
elif key == 'info':
for i in values:
if i in res_data['algo_info']:
parameters[values[i]] = res_data['algo_info'][i]
elif key == 'files' or key == 'hidden_files':
for file_name, file_label in values.items():
src_file = os.path.join(work_dir, file_name)
if not os.path.exists(src_file):
continue # declared file in ddl is not there
if not file_label: # if no label given, use filename
file_label = file_name
value = {file_label: src_file}
try: # to get a thumbnail
print(src_file)
thumb_file = create_thumbnail(src_file)
except Exception:
print(traceback.format_exc())
if thumb_file:
value[os.path.basename(thumb_file)] = thumb_file
blobs.append(value)
elif key == 'compressed_files':
for file_name, file_label in values.items():
src_file = os.path.join(work_dir, file_name)
if not os.path.exists(src_file):
continue # normal?
src_handle = open(src_file, 'rb')
gz_file = src_file + '.gz'
gz_handle = gzip.open(gz_file, 'wb')
gz_handle.writelines(src_handle)
src_handle.close()
gz_handle.close()
if not file_label: # if no label given, use filename
file_label = file_name
blobs.append({file_label: gz_file})
if 'enable_reconstruct' in ddl_archive and ddl_archive['enable_reconstruct'] and request is not None:
clientData = json.loads(request['clientData'])
if clientData.get("origin", "") == "upload":
# Count how many file entries and remove them
file_keys = [key for key in request if key.startswith("file_")]
files = request.copy()
list(map(files.pop, file_keys))
clientData["files"] = len(file_keys)
execution = {}
execution['demo_id'] = demo_id
execution['request'] = clientData
execution['response'] = res_data
execution_json = json.dumps(execution)
else:
execution_json = None
url = 'http://{}/api/archive/add_experiment'.format(host_name)
data = {
"demo_id": demo_id,
"blobs": json.dumps(blobs),
"parameters": json.dumps(parameters),
"execution": execution_json
}
resp = requests.post(url, data=data)
return resp.json()
| #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Helper functions for core, related to the archive module.
"""
import gzip
import json
import os
import traceback
from collections import OrderedDict
import requests
from ipolutils.utils import thumbnail
def create_thumbnail(src_file):
"""
Create thumbnail when possible from file to archive in run folder,
returns the filepath of thumbnail when created.
"""
thumb_height = 128
if not os.path.exists(src_file):
return False
thumb_name, _ = os.path.splitext(os.path.basename(src_file))
thumb_name = thumb_name.lower() + '_thumbnail.jpeg'
thumb_file = os.path.join(os.path.dirname(src_file), thumb_name)
try:
thumbnail(src_file, thumb_height, thumb_file)
except Exception:
return False
return thumb_file
def send_to_archive(demo_id, work_dir, request, ddl_archive, res_data, host_name):
"""
Prepare an execution folder for archiving an experiment (thumbnails).
Collect information and parameters.
Send data to the archive module.
"""
# let's add all the parameters
parameters = OrderedDict()
blobs = []
for key, values in ddl_archive.items():
if key == 'params':
for p in values:
if p in res_data['params']:
parameters[p] = res_data['params'][p]
elif key == 'info':
for i in values:
if i in res_data['algo_info']:
parameters[values[i]] = res_data['algo_info'][i]
elif key == 'files' or key == 'hidden_files':
for file_name, file_label in values.items():
src_file = os.path.join(work_dir, file_name)
if not os.path.exists(src_file):
continue # declared file in ddl is not there
if not file_label: # if no label given, use filename
file_label = file_name
value = {file_label: src_file}
try: # to get a thumbnail
thumb_file = create_thumbnail(src_file)
except Exception:
print(traceback.format_exc())
if thumb_file:
value[os.path.basename(thumb_file)] = thumb_file
blobs.append(value)
elif key == 'compressed_files':
for file_name, file_label in values.items():
src_file = os.path.join(work_dir, file_name)
if not os.path.exists(src_file):
continue # normal?
src_handle = open(src_file, 'rb')
gz_file = src_file + '.gz'
gz_handle = gzip.open(gz_file, 'wb')
gz_handle.writelines(src_handle)
src_handle.close()
gz_handle.close()
if not file_label: # if no label given, use filename
file_label = file_name
blobs.append({file_label: gz_file})
if 'enable_reconstruct' in ddl_archive and ddl_archive['enable_reconstruct'] and request is not None:
clientData = json.loads(request['clientData'])
if clientData.get("origin", "") == "upload":
# Count how many file entries and remove them
file_keys = [key for key in request if key.startswith("file_")]
files = request.copy()
list(map(files.pop, file_keys))
clientData["files"] = len(file_keys)
execution = {}
execution['demo_id'] = demo_id
execution['request'] = clientData
execution['response'] = res_data
execution_json = json.dumps(execution)
else:
execution_json = None
url = 'http://{}/api/archive/add_experiment'.format(host_name)
data = {
"demo_id": demo_id,
"blobs": json.dumps(blobs),
"parameters": json.dumps(parameters),
"execution": execution_json
}
resp = requests.post(url, data=data)
return resp.json()
| Python | 0 |
871f49eea1197af8224c601833e6e96f59697eb3 | Update phishing_database.py | plugins/feeds/public/phishing_database.py | plugins/feeds/public/phishing_database.py | #!/usr/bin/env python
"""This class will incorporate the PhishingDatabase feed into yeti."""
from datetime import timedelta
import logging
from core.observables import Url
from core.feed import Feed
from core.errors import ObservableValidationError
class PhishingDatabase(Feed):
"""This class will pull the PhishingDatabase feed from github on a 12 hour interval."""
default_values = {
'frequency': timedelta(hours=12),
'name': 'PhishingDatabase',
'source': 'https://raw.githubusercontent.com/mitchellkrogza/Phishing.Database/master/phishing-links-NEW-today.txt',
'description':
'Phishing Domains, urls websites and threats database.'
}
def update(self):
for url in self.update_lines():
self.analyze(url)
def analyze(self, url):
context = {'source': self.name}
try:
url = Url.get_or_create(value=url)
url.add_context(context)
url.add_source(self.name)
url.tag(['phishing'])
except ObservableValidationError as e:
logging.error(e)
| from datetime import timedelta
import logging
from core.observables import Url
from core.feed import Feed
from core.errors import ObservableValidationError
class PhishingDatabase(Feed):
""" This class will pull the PhishingDatabase feed from github on a 12 hour interval. """
default_values = {
'frequency': timedelta(hours=12),
'name': 'PhishingDatabase',
'source': 'https://raw.githubusercontent.com/mitchellkrogza/Phishing.Database/master/phishing-links-NEW-today.txt',
'description':
'Phishing Domains, urls websites and threats database.'
}
def update(self):
for url in self.update_lines():
self.analyze(url)
def analyze(self, url):
context = {'source': self.name}
try:
url = Url.get_or_create(value=url)
url.add_context(context)
url.add_source(self.name)
url.tag(['phishing'])
except ObservableValidationError as e:
logging.error(e)
| Python | 0.000002 |
453e50823d3fb7a5937311e9118abbcbd5309855 | Implement a bunch more rare neutral minions | fireplace/carddata/minions/neutral/rare.py | fireplace/carddata/minions/neutral/rare.py | import random
from ...card import *
from fireplace.enums import Race
# Injured Blademaster
class CS2_181(Card):
def action(self):
self.damage(4)
# Young Priestess
class EX1_004(Card):
def endTurn(self):
other_minions = [t for t in self.controller.field if t is not self]
if other_minions:
random.choice(other_minions).buff("EX1_004e")
class EX1_004e(Card):
health = 1
# Coldlight Oracle
class EX1_050(Card):
def action(self):
self.controller.draw(2)
self.controller.opponent.draw(2)
# Arcane Golem
class EX1_089(Card):
def action(self):
self.controller.opponent.gainMana(1)
# Defender of Argus
class EX1_093(Card):
def action(self):
for target in self.adjacentMinions:
target.buff("EX1_093e")
class EX1_093e(Card):
atk = 1
health = 1
taunt = True
# Abomination
class EX1_097(Card):
def deathrattle(self):
for target in self.controller.getTargets(TARGET_ALL_CHARACTERS):
target.damage(2)
# Coldlight Seer
class EX1_103(Card):
def action(self):
for minion in self.controller.field:
if minion.race == Race.MURLOC:
minion.buff("EX1_103e")
class EX1_103e(Card):
health = 2
# Ancient Mage
class EX1_584(Card):
def action(self):
for target in self.adjacentMinions:
target.buff("EX1_584e")
class EX1_584e(Card):
spellpower = 1
# Imp Master
class EX1_597(Card):
def endTurn(self):
self.damage(1)
self.controller.summon("EX1_598")
# Nerubian Egg
class FP1_007(Card):
deathrattle = summonMinion("FP1_007t")
# Sludge Belcher
class FP1_012(Card):
deathrattle = summonMinion("FP1_012t")
# Bloodsail Corsair
class NEW1_025(Card):
def action(self):
weapon = self.controller.opponent.hero.weapon
if self.controller.opponent.hero.weapon:
weapon.loseDurability(1)
# Master Swordsmith
class NEW1_037(Card):
def endTurn(self):
other_minions = [t for t in self.controller.field if t is not self]
if other_minions:
random.choice(other_minions).buff("NEW1_037e")
class NEW1_037e(Card):
atk = 1
# Stampeding Kodo
class NEW1_041(Card):
def action(self):
targets = [t for t in self.controller.opponent.field if t.atk <= 2]
if targets:
random.choice(targets).destroy()
| from ...card import *
# Abomination
class EX1_097(Card):
def deathrattle(self):
for target in self.controller.getTargets(TARGET_ALL_CHARACTERS):
target.damage(2)
# Bloodsail Corsair
class NEW1_025(Card):
def action(self):
weapon = self.controller.opponent.hero.weapon
if self.controller.opponent.hero.weapon:
weapon.loseDurability(1)
| Python | 0.000004 |
730489e4f6a7f3067ad67c16512c2cbcb97f3272 | stop gap on astronmical solar zenith | bin/astronomical.py | bin/astronomical.py | """
astronomical.py, Sam Murphy (2017-04-27)
Astronomical calculations (e.g. solar angles) for
processing satellite imagery through Google Earth
Engine.
"""
import ee
class Astronomical:
pi = 3.141592653589793
degToRad = pi / 180 # degress to radians
radToDeg = 180 / pi # radians to degress
def sin(x):return ee.Number(x).sin()
def cos(x):return ee.Number(x).cos()
def radians(x):return ee.Number(x).multiply(Astronomical.degToRad)
def degrees(x):return ee.Number(x).multiply(Astronomical.radToDeg)
def dayOfYear(date):
jan01 = ee.Date.fromYMD(date.get('year'),1,1)
doy = date.difference(jan01,'day').toInt().add(1)
return doy
def solarDeclination(date):
"""
Calculates the solar declination angle (radians)
https://en.wikipedia.org/wiki/Position_of_the_Sun
simple version..
d = ee.Number(.doy).add(10).multiply(0.017214206).cos().multiply(-23.44)
a more accurate version used here..
"""
doy = Astronomical.dayOfYear(date)
N = ee.Number(doy).subtract(1)
solstice = N.add(10).multiply(0.985653269)
eccentricity = N.subtract(2).multiply(0.985653269).multiply(Astronomical.degToRad).sin().multiply(1.913679036)
axial_tilt = ee.Number(-23.44).multiply(Astronomical.degToRad).sin()
return solstice.add(eccentricity).multiply(Astronomical.degToRad).cos().multiply(axial_tilt).asin()
def solarZenith(geom,date):
"""
Calculates solar zenith angle (degrees)
https://en.wikipedia.org/wiki/Solar_zenith_angle
"""
latitude = Astronomical.radians(geom.centroid().coordinates().get(1))
d = Astronomical.solarDeclination(date)
hourAngle = Astronomical.radians(date.get('hour').subtract(12).multiply(15))
sines = Astronomical.sin(latitude).multiply(Astronomical.sin(d))
cosines = Astronomical.cos(latitude).multiply(Astronomical.cos(d)).multiply(Astronomical.cos(hourAngle))
solar_z = sines.add(cosines).acos()
return 'need to check this out'#solar_z.multiply(Astronomical.radToDeg) | """
astronomical.py, Sam Murphy (2017-04-27)
Astronomical calculations (e.g. solar angles) for
processing satellite imagery through Google Earth
Engine.
"""
import ee
class Astronomical:
pi = 3.141592653589793
degToRad = pi / 180 # degress to radians
radToDeg = 180 / pi # radians to degress
def sin(x):return ee.Number(x).sin()
def cos(x):return ee.Number(x).cos()
def radians(x):return ee.Number(x).multiply(Astronomical.degToRad)
def degrees(x):return ee.Number(x).multiply(Astronomical.radToDeg)
def dayOfYear(date):
jan01 = ee.Date.fromYMD(date.get('year'),1,1)
doy = date.difference(jan01,'day').toInt().add(1)
return doy
def solarDeclination(date):
"""
Calculates the solar declination angle (radians)
https://en.wikipedia.org/wiki/Position_of_the_Sun
simple version..
d = ee.Number(.doy).add(10).multiply(0.017214206).cos().multiply(-23.44)
a more accurate version used here..
"""
doy = Astronomical.dayOfYear(date)
N = ee.Number(doy).subtract(1)
solstice = N.add(10).multiply(0.985653269)
eccentricity = N.subtract(2).multiply(0.985653269).multiply(Astronomical.degToRad).sin().multiply(1.913679036)
axial_tilt = ee.Number(-23.44).multiply(Astronomical.degToRad).sin()
return solstice.add(eccentricity).multiply(Astronomical.degToRad).cos().multiply(axial_tilt).asin()
def solarZenith(geom,date):
"""
Calculates solar zenith angle (degrees)
https://en.wikipedia.org/wiki/Solar_zenith_angle
"""
latitude = Astronomical.radians(geom.centroid().coordinates().get(1))
d = Astronomical.solarDeclination(date)
hourAngle = Astronomical.radians(date.get('hour').subtract(12).multiply(15))
sines = Astronomical.sin(latitude).multiply(Astronomical.sin(d))
cosines = Astronomical.cos(latitude).multiply(Astronomical.cos(d)).multiply(Astronomical.cos(hourAngle))
solar_z = sines.add(cosines).acos()
return solar_z.multiply(Astronomical.radToDeg) | Python | 0.000001 |
9e3becba368e5cc916c9af99a89e62e502d0a506 | Fix syntax error in urls | greenland/urls.py | greenland/urls.py | """greenland URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import maps.views
urlpatterns = [
url(r'^$', maps.views.index, name='index'),
url(r'^admin/', admin.site.urls),
url(r'^start/(?P<question_set_id>\d+)/', maps.views.start, name='start'),
url(r'^choice/', maps.views.get_choice, name='choice'),
url(r'^run/(?P<answer_set_id>\d+)/(?P<index>\d+)', maps.views.run, name='task'),
url(r'^results/(?P<answer_set_id>\d+)', maps.views.results, name='results'),
url(r'^api/', include('maps.api.urls'))
]
| """greenland URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import maps.views
urlpatterns = [
url(r'^$', maps.views.index, name='index'),
url(r'^admin/', admin.site.urls),
url(r'^start/(?P<question_set_id>\d+)/', maps.views.start, name='start'),
url(r'^choice/', maps.views.get_choice, name='choice'),
url(r'^run/(?P<answer_set_id>\d+)/(?P<index>\d+)', maps.views.run, name='task'),
url(r'^results/(?P<answer_set_id>\d+)', maps.views.results, name='results')
url(r'^api/', include('maps.api.urls'))
]
| Python | 0.000254 |
0a08c933375197bd630442e4c1f27c68fb2c8d0b | Reorder some things | groupvpn-webui.py | groupvpn-webui.py | import json
import math
import random
import re
import string
from flask import Flask, redirect, render_template, request, url_for
import ipaddress
import wtforms as w
PASSWORD_CHARS = string.ascii_lowercase + string.digits
PASSWORD_LENGTH = 30
class IPNetworkField(w.Field):
widget = w.widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(IPNetworkField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return str(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = ipaddress.ip_network(valuelist[0])
except ValueError as e:
self.data = None
raise ValueError(e)
class ConfigurationForm(w.Form):
group_name = w.TextField("Group name", [w.validators.DataRequired()])
xmpp_host = w.TextField("XMPP host", [w.validators.DataRequired()])
machine_count = w.IntegerField(
"Number of machines", [w.validators.NumberRange(min=2)])
ip_network = IPNetworkField(
"IP network", default=ipaddress.ip_network(u"192.168.0.0/24"),
description="Enter the network base address followed by either a "
"netmask or a prefix length.")
end_to_end_security = w.BooleanField("End-to-end security")
def validate(self):
return (super(ConfigurationForm, self).validate() and
self.validate_enough_addresses())
def validate_enough_addresses(self):
available_addresses = len(list(self.ip_network.data.hosts()))
if available_addresses >= self.machine_count.data:
return True
else:
self.ip_network.errors.append("Network only contains {} addresses"
"".format(available_addresses))
return False
def make_configs(group_name, xmpp_host, ip_network,
machine_count, end_to_end_security):
max_digits = int(math.log10(machine_count - 1)) + 1
username_template = "{}{{:0{}}}".format(group_name, max_digits)
ips = iter(ip_network.hosts())
configs = []
for n in range(1, machine_count + 1):
username = re.sub(r'\W+', '_', username_template.format(n).lower())
password = ''.join(random.choice(PASSWORD_CHARS)
for _ in range(PASSWORD_LENGTH))
data = {
'xmpp_username': username,
'xmpp_password': password,
'xmpp_host': xmpp_host,
'ip': str(next(ips)),
}
configs.append({'filename': "{}.json".format(username),
'data': json.dumps(data, indent=4)})
return configs
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.keep_trailing_newline = True
@app.route('/', methods=['GET'])
def home():
return redirect(url_for('configurate'))
@app.route('/configurate', methods=['GET', 'POST'])
def configurate():
form = ConfigurationForm(request.form)
if request.method == 'POST' and form.validate():
configs = make_configs(form.group_name.data, form.xmpp_host.data,
form.ip_network.data,
form.machine_count.data,
form.end_to_end_security.data)
return render_template('success.html', form=form, configs=configs)
return render_template('configuration.html', form=form,
post_url=url_for('configurate'))
if __name__ == '__main__':
app.run(debug=True)
| import json
import math
import random
import re
import string
from flask import Flask, redirect, render_template, request, url_for
import ipaddress
import wtforms as w
PASSWORD_CHARS = string.ascii_lowercase + string.digits
PASSWORD_LENGTH = 30
class IPNetworkField(w.Field):
widget = w.widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(IPNetworkField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return str(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = ipaddress.ip_network(valuelist[0])
except ValueError as e:
self.data = None
raise ValueError(e)
class ConfigurationForm(w.Form):
group_name = w.TextField("Group name", [w.validators.DataRequired()])
xmpp_host = w.TextField("XMPP host", [w.validators.DataRequired()])
machine_count = w.IntegerField(
"Number of machines", [w.validators.NumberRange(min=2)])
ip_network = IPNetworkField(
"IP network", default=ipaddress.ip_network(u"192.168.0.0/24"),
description="Enter the network base address followed by either a "
"netmask or a prefix length.")
end_to_end_security = w.BooleanField("End-to-end security")
def validate(self):
return (super(ConfigurationForm, self).validate() and
self.validate_enough_addresses())
def validate_enough_addresses(self):
available_addresses = len(list(self.ip_network.data.hosts()))
if available_addresses >= self.machine_count.data:
return True
else:
self.ip_network.errors.append("Network only contains {} addresses"
"".format(available_addresses))
return False
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.keep_trailing_newline = True
@app.route('/', methods=['GET'])
def home():
return redirect(url_for('configurate'))
@app.route('/configurate', methods=['GET', 'POST'])
def configurate():
form = ConfigurationForm(request.form)
if request.method == 'POST' and form.validate():
configs = make_configs(form.group_name.data, form.xmpp_host.data,
form.ip_network.data,
form.machine_count.data,
form.end_to_end_security.data)
return render_template('success.html', form=form, configs=configs)
return render_template('configuration.html', form=form,
post_url=url_for('configurate'))
def make_configs(group_name, xmpp_host, ip_network,
machine_count, end_to_end_security):
max_digits = int(math.log10(machine_count - 1)) + 1
username_template = "{}{{:0{}}}".format(group_name, max_digits)
ips = iter(ip_network.hosts())
configs = []
for n in range(1, machine_count + 1):
username = re.sub(r'\W+', '_', username_template.format(n).lower())
password = ''.join(random.choice(PASSWORD_CHARS)
for _ in range(PASSWORD_LENGTH))
data = {
'xmpp_username': username,
'xmpp_password': password,
'xmpp_host': xmpp_host,
'ip': str(next(ips)),
}
configs.append({'filename': "{}.json".format(username),
'data': json.dumps(data, indent=4)})
return configs
if __name__ == '__main__':
app.run(debug=True)
| Python | 0.000335 |
4c6c41872b9a547917d81996f5f93d628c90216d | proper print | temperature-sparkpy.py | temperature-sparkpy.py |
from __future__ import print_function
import sys
import math
from operator import add
from pyspark import SparkContext
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def mapper(line):
# positive or negative
sign = line[87:88]
# before the decimal point, remove leading zeros
before_decimal = line[88:92].lstrip("0")
# combine into string that can be cast to decimal
degrees = sign + before_decimal + "." + line[92:93]
if (is_number(degrees)):
return float(degrees)
else:
return 0
def reducer(a, b):
if a > b:
return a
else:
return b
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: wordcount <file>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PySparkTemperature")
lines = sc.textFile(sys.argv[1], 1)
output = lines.map(mapper) \
.reduce(reducer)
print ("Max " + output)
sc.stop()
|
from __future__ import print_function
import sys
import math
from operator import add
from pyspark import SparkContext
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def mapper(line):
# positive or negative
sign = line[87:88]
# before the decimal point, remove leading zeros
before_decimal = line[88:92].lstrip("0")
# combine into string that can be cast to decimal
degrees = sign + before_decimal + "." + line[92:93]
if (is_number(degrees)):
return float(degrees)
else:
return 0
def reducer(a, b):
if a > b:
return a
else:
return b
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: wordcount <file>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PySparkTemperature")
lines = sc.textFile(sys.argv[1], 1)
counts = lines.map(mapper) \
.reduce(reducer)
output = counts.collect()
print ("Max " + output)
sc.stop()
| Python | 0.997763 |
548edcb10ef949d394388282d052da36135982d7 | Add coveralls support[5]. | build.py | build.py | #!/usr/bin/python
import os
import shutil
from subprocess import call
import sys
import platform
cur_path = os.getcwd()
build_path = os.getcwd() + "/build"
if platform.system() == 'Windows':
build_path = os.getcwd() + "/win_build"
if 'test' in sys.argv:
os.chdir(build_path)
r = call(["make tests"], shell=True)
exit(r)
if 'docs' in sys.argv:
call(["doxygen Doxyfile"], shell=True)
call(
["xsltproc doc/xml/combine.xslt doc/xml/index.xml > doc/xml/all.xml"],
shell=True)
call(["python doxml2md.py doc/xml/all.xml"], shell=True)
sys.exit(0)
if 'all' in sys.argv or not os.path.exists(build_path):
if os.path.exists(build_path):
shutil.rmtree(build_path, ignore_errors=True)
if not os.path.exists(build_path):
os.mkdir(build_path)
os.chdir(build_path)
if platform.system() == 'Windows':
call(["cmake"] + ['-G'] + ['Visual Studio 15 2017 Win64'] +
sys.argv[2:] + [cur_path])
else:
call(["cmake"] + sys.argv[2:] + [cur_path])
os.chdir(build_path)
if platform.system() == 'Windows':
make_result =\
call([r"MSBuild.exe"] + [r"/p:Configuration=Release"] +
[r"/p:Machine=X64"] + ["PONOS.sln"],
shell=True)
else:
make_result = call(["make -j8"], shell=True)
if "-DTRAVIS=1" not in sys.argv:
call(["make install"], shell=True)
if make_result != 0:
sys.exit(1)
| #!/usr/bin/python
import os
import shutil
from subprocess import call
import sys
import platform
cur_path = os.getcwd()
build_path = os.getcwd() + "/build"
if platform.system() == 'Windows':
build_path = os.getcwd() + "/win_build"
if 'test' in sys.argv:
os.chdir(build_path)
r = call(["make tests"], shell=True)
exit(r)
if 'docs' in sys.argv:
call(["doxygen Doxyfile"], shell=True)
call(
["xsltproc doc/xml/combine.xslt doc/xml/index.xml > doc/xml/all.xml"],
shell=True)
call(["python doxml2md.py doc/xml/all.xml"], shell=True)
sys.exit(0)
if 'all' in sys.argv or not os.path.exists(build_path):
if os.path.exists(build_path):
shutil.rmtree(build_path, ignore_errors=True)
if not os.path.exists(build_path):
os.mkdir(build_path)
os.chdir(build_path)
if platform.system() == 'Windows':
call(["cmake"] + ['-G'] + ['Visual Studio 15 2017 Win64'] +
sys.argv[2:] + [cur_path])
else:
call(["cmake"] + sys.argv[2:] + [cur_path])
os.chdir(build_path)
if platform.system() == 'Windows':
make_result =\
call([r"MSBuild.exe"] + [r"/p:Configuration=Release"] +
[r"/p:Machine=X64"] + ["PONOS.sln"],
shell=True)
else:
make_result = call(["make -j8"], shell=True)
call(["make install"], shell=True)
if make_result != 0:
sys.exit(1)
| Python | 0 |
df41dcb3c8538e482bcc61f9817ce26569652b6b | build script set user data for git | build.py | build.py | # -*- coding: utf-8 -*-
import os
import sh
from logging_service import __version__ as version
GIT_USER = 'circle-ci'
GIT_EMAIL = 'vitomarti@gmail.com'
def open_file(path):
return open(path, 'r+')
def get_git(repo_path):
return sh.git.bake(_cwd=repo_path)
def set_user_data_git(git):
git('config', '--global', 'user.email', GIT_EMAIL)
git('config', '--global', 'user.name', GIT_USER)
def main():
file_build = open_file('build_version')
lines = file_build.readlines()
build_version_old = lines[0]
build_version_new = str(int(build_version_old) + 1)
lines = [line.replace(build_version_old, build_version_new) for line in lines]
file_build.seek(0)
file_build.writelines(lines)
file_build.close()
repo_path = os.path.abspath(os.path.dirname(__file__))
git = get_git(repo_path)
set_user_data_git(git)
git('add', '-u')
new_tag_version = version + '-' + build_version_old
feature_message = 'feat: auto tag ' + new_tag_version
git('commit', '-m', feature_message)
git('push', 'origin', 'master')
git('tag', new_tag_version)
git('push', 'origin', '--tags')
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
import os
import sh
from logging_service import __version__ as version
def open_file(path):
return open(path, 'r+')
def get_git(repo_path):
return sh.git.bake(_cwd=repo_path)
def main():
file_build = open_file('build_version')
lines = file_build.readlines()
build_version_old = lines[0]
build_version_new = str(int(build_version_old) + 1)
lines = [line.replace(build_version_old, build_version_new) for line in lines]
file_build.seek(0)
file_build.writelines(lines)
file_build.close()
repo_path = os.path.abspath(os.path.dirname(__file__))
git = get_git(repo_path)
git('add', '-u')
new_tag_version = version + '-' + build_version_old
feature_message = 'feat: auto tag ' + new_tag_version
git('commit', '-m', feature_message)
git('push', 'origin', 'master')
git('tag', new_tag_version)
git('push', 'origin', '--tags')
if __name__ == '__main__':
main()
| Python | 0.000001 |
54b8e07ac412e757fb32ebfa19b75ef8a72f6688 | Print build path | build.py | build.py | #!/usr/bin/env python
import sys
import os
from argparse import ArgumentParser
from subprocess import check_call, check_output
def ensure_tool(name):
check_call(['which', name])
def build_and_publish(path, args):
login_command = get_login_command(args)
print >>sys.stderr, "Test anaconda.org login:"
check_call(login_command)
binfile = check_output(['conda', 'build', '--output', path])
binfile = binfile.strip()
print >>sys.stderr, "build path {}".format(binfile)
print >>sys.stderr, "conda build {}".format(path)
check_call(['conda', 'build', path])
upload_command = "binstar upload --force {}".format(binfile)
login_and_upload_command = "{} && {}".format(login_command, upload_command)
print >>sys.stderr, "Login to binstar and upload"
check_call(login_and_upload_command)
def get_login_command(args):
return ("binstar login --hostname {hostname} "
" --username {username} --password {password}")\
.format(
hostname='https://api.anaconda.org',
username=args.username,
password=args.password,
)
def get_conda_recipes_dir(project):
# make sure the project has a conda recipes folder
conda_recipes_dir = os.path.join(project, 'conda')
if not os.path.isdir(conda_recipes_dir):
sys.exit('no such dir: {}'.format(conda_recipes_dir))
return conda_recipes_dir
def conda_paths(conda_recipes_dir):
for name in sorted(os.listdir(conda_recipes_dir)):
yield os.path.join(conda_recipes_dir, name)
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-P', '--password', required=True)
parser.add_argument('-p', '--project', required=True)
parser.add_argument('-s', '--site', required=False, default=None)
args = parser.parse_args()
# make sure we have a conda environment
ensure_tool('conda')
ensure_tool('binstar')
conda_recipes_dir = get_conda_recipes_dir(args.project)
for conda_path in conda_paths(conda_recipes_dir):
build_and_publish(conda_path, args)
return 0
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
import sys
import os
from argparse import ArgumentParser
from subprocess import check_call, check_output
def ensure_tool(name):
check_call(['which', name])
def build_and_publish(path, args):
login_command = get_login_command(args)
print >>sys.stderr, "Test anaconda.org login:"
check_call(login_command)
binfile = check_output(['conda', 'build', '--output', path])
binfile = binfile.strip()
print >>sys.stderr, "conda build {}".format(path)
check_call(['conda', 'build', path])
upload_command = "binstar upload --force {}".format(binfile)
login_and_upload_command = "{} && {}".format(login_command, upload_command)
print >>sys.stderr, "Login to binstar and upload"
check_call(login_and_upload_command)
def get_login_command(args):
return ("binstar login --hostname {hostname} "
" --username {username} --password {password}")\
.format(
hostname='https://api.anaconda.org',
username=args.username,
password=args.password,
)
def get_conda_recipes_dir(project):
# make sure the project has a conda recipes folder
conda_recipes_dir = os.path.join(project, 'conda')
if not os.path.isdir(conda_recipes_dir):
sys.exit('no such dir: {}'.format(conda_recipes_dir))
return conda_recipes_dir
def conda_paths(conda_recipes_dir):
for name in sorted(os.listdir(conda_recipes_dir)):
yield os.path.join(conda_recipes_dir, name)
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-P', '--password', required=True)
parser.add_argument('-p', '--project', required=True)
parser.add_argument('-s', '--site', required=False, default=None)
args = parser.parse_args()
# make sure we have a conda environment
ensure_tool('conda')
ensure_tool('binstar')
conda_recipes_dir = get_conda_recipes_dir(args.project)
for conda_path in conda_paths(conda_recipes_dir):
build_and_publish(conda_path, args)
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000001 |
00e68cff5e7d370e137383b4e0c3c774ddb4c929 | update metadata | l10n_br_sale_stock/__openerp__.py | l10n_br_sale_stock/__openerp__.py | # -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2013 Raphaël Valyi - Akretion #
# Copyright (C) 2013 Renato Lima - Akretion #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
{
'name': 'Brazilian Localization Sales and Warehouse',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, ,Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '8.0.1.0.0',
'depends': [
'sale_stock',
'l10n_br_sale_product',
'l10n_br_stock_account',
],
'data': [
'data/l10n_br_sale_stock_data.xml',
'views/sale_stock_view.xml',
],
'demo': [
'l10n_br_sale_stock_demo.xml',
],
'test': [
'test/sale_order_demo.yml'
],
'installable': True,
'auto_install': True,
}
| # -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2013 Raphaël Valyi - Akretion #
# Copyright (C) 2013 Renato Lima - Akretion #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
{
'name': 'Brazilian Localization Sales and Warehouse',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, ,Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '8.0.1.0.0',
'depends': [
'sale_stock',
'l10n_br_sale_product',
'l10n_br_stock_account',
],
'data': [
'data/l10n_br_sale_stock_data.xml',
'views/sale_stock_view.xml',
],
'demo': [
'l10n_br_sale_stock_demo.xml',
<<<<<<< HEAD
=======
#'test/sale_order_demo.yml'
>>>>>>> comment data file
],
'test': [
'test/sale_order_demo.yml'
],
'installable': True,
'auto_install': True,
}
| Python | 0.000001 |
3d52e82a295c7c7d6b77d81a1d2c6ac0929bb120 | make sqlitecache bundable. | sqlite_cache/__init__.py | sqlite_cache/__init__.py | from __future__ import absolute_import
from .core import SQLiteCache # pragma: no cover
| from sqlite_cache.core import SQLiteCache # pragma: no cover
| Python | 0 |
962322fd385bcfcc670ead757190d37955ccda14 | improve logging and add ros params | lg_earth/src/lg_earth/kmlalive.py | lg_earth/src/lg_earth/kmlalive.py | import subprocess
import rospy
import rosservice
import traceback
import sys
class KmlAlive:
def __init__(self, earth_proc):
self.earth_proc = earth_proc
rospy.loginfo("XXX starting KMLALIVE process")
self.timeout_period = rospy.get_param(~timeout_period, 5)
self.initial_timeout = rospy.get_param(~initial_timeout, 60)
rospy.Timer(rospy.Duration(10), self.keep_alive, oneshot=True)
# only restart when worked is true, otherwise
# it may have never worked
self.worked = False
def keep_alive(self, *args, **kwargs):
try:
self._keep_alive(args, kwargs)
except Exception as e:
rospy.logerr("exception was {} {} {}".format(e, traceback.format_exc(), sys.exc_info()[0]))
rospy.sleep(1)
self.keep_alive(args, kwargs)
def _keep_alive(self, *args, **kwargs):
rospy.logerr("XXX in first keep_alive")
loop_timeout = 1
counter = 0
with open('/dev/null', 'w') as dev_null:
while not rospy.is_shutdown():
try:
pid = self.earth_proc.proc.watcher.proc.pid
except AttributeError as e:
counter = 0
rospy.logwarn("Earth proc doesn't exist {}".format(e))
rospy.sleep(loop_timeout)
continue
if '/kmlsync/state' in rosservice.get_service_list():
cmd = "lsof -Pn -p {} -a -i @127.0.0.1:8765".format(pid).split(' ')
ret_value = subprocess.call(
cmd,
stdout=dev_null,
stderr=dev_null,
close_fds=True
)
if ret_value == 0:
self.worked = True
counter = 0
else:
counter += 1
rospy.logerr("XXX found non zero value for {} counter at {}".format(pid, counter))
if (counter > self.timeout_period and self.worked) or counter > self.initial_timeout:
rospy.logerr("XXX RELAUNCHING worked: {} counter: {}".format(self.worked, counter))
self.earth_proc.handle_soft_relaunch()
counter = 0
self.worked = False
else:
rospy.logerr("no kml sync state found")
rospy.sleep(loop_timeout)
| import subprocess
import rospy
import rosservice
import traceback
import sys
class KmlAlive:
def __init__(self, earth_proc):
self.earth_proc = earth_proc
rospy.loginfo("XXX starting KMLALIVE process")
rospy.Timer(rospy.Duration(10), self.keep_alive, oneshot=True)
# only restart when worked is true, otherwise
# it may have never worked
self.worked = False
def keep_alive(self, *args, **kwargs):
try:
self._keep_alive(args, kwargs)
except Exception as e:
rospy.logerr("exception was {} {} {}".format(e, traceback.format_exc(), sys.exc_info()[0]))
rospy.sleep(1)
self.keep_alive(args, kwargs)
def _keep_alive(self, *args, **kwargs):
rospy.logerr("XXX in first keep_alive")
loop_timeout = 1
counter = 0
with open('/dev/null', 'w') as dev_null:
while not rospy.is_shutdown():
try:
pid = self.earth_proc.proc.watcher.proc.pid
except AttributeError as e:
counter = 0
rospy.logwarn("Earth proc doesn't exist {}".format(e))
rospy.sleep(loop_timeout)
continue
if '/kmlsync/state' in rosservice.get_service_list():
cmd = "lsof -Pn -p {} -a -i @127.0.0.1:8765".format(pid).split(' ')
ret_value = subprocess.call(
cmd,
stdout=dev_null,
stderr=dev_null,
close_fds=True
)
if ret_value == 0:
self.worked = True
counter = 0
else:
counter += 1
rospy.logerr("XXX found non zero value for {} counter at {}".format(pid, counter))
if (counter > 5 and self.worked) or counter > 60:
rospy.logerr("XXX RELAUNCHING")
self.earth_proc.handle_soft_relaunch()
counter = 0
self.worked = False
else:
rospy.logerr("no kml sync state found")
rospy.sleep(loop_timeout)
| Python | 0 |
f672da20640b761d47d5c15d791e06fc5e25fd35 | Fix Deprecation warning in Django 1.9 | bootstrap3/utils.py | bootstrap3/utils.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django import VERSION
from django.forms.widgets import flatatt
from django.template import (Context, RequestContext, Template, Variable,
VariableDoesNotExist)
from django.template.base import (FilterExpression, TemplateSyntaxError,
kwarg_re)
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from .text import text_value
try:
from django.utils.html import format_html
except ImportError:
from .legacy import format_html_pre_18 as format_html
# RegEx for quoted string
QUOTED_STRING = re.compile(r'^["\'](?P<noquotes>.+)["\']$')
def handle_var(value, context):
"""
Handle template tag variable
"""
# Resolve FilterExpression and Variable immediately
if isinstance(value, FilterExpression) or isinstance(value, Variable):
return value.resolve(context)
# Return quoted strings unquoted
# http://djangosnippets.org/snippets/886
stringval = QUOTED_STRING.search(value)
if stringval:
return stringval.group('noquotes')
# Resolve variable or return string value
try:
return Variable(value).resolve(context)
except VariableDoesNotExist:
return value
def parse_token_contents(parser, token):
"""
Parse template tag contents
"""
bits = token.split_contents()
tag = bits.pop(0)
args = []
kwargs = {}
asvar = None
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError(
'Malformed arguments to tag "{}"'.format(tag))
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return {
'tag': tag,
'args': args,
'kwargs': kwargs,
'asvar': asvar,
}
def split_css_classes(css_classes):
"""
Turn string into a list of CSS classes
"""
classes_list = text_value(css_classes).split(' ')
return [c for c in classes_list if c]
def add_css_class(css_classes, css_class, prepend=False):
"""
Add a CSS class to a string of CSS classes
"""
classes_list = split_css_classes(css_classes)
classes_to_add = [c for c in split_css_classes(css_class)
if c not in classes_list]
if prepend:
classes_list = classes_to_add + classes_list
else:
classes_list += classes_to_add
return ' '.join(classes_list)
def remove_css_class(css_classes, css_class):
"""
Remove a CSS class from a string of CSS classes
"""
remove = set(split_css_classes(css_class))
classes_list = [c for c in split_css_classes(css_classes)
if c not in remove]
return ' '.join(classes_list)
def render_link_tag(url, rel='stylesheet', media=None):
"""
Build a link tag
"""
attrs = {
'href': url,
'rel': rel,
}
if media:
attrs['media'] = media
return render_tag('link', attrs=attrs, close=False)
def render_tag(tag, attrs=None, content=None, close=True):
"""
Render a HTML tag
"""
builder = '<{tag}{attrs}>{content}'
if content or close:
builder += '</{tag}>'
return format_html(
builder,
tag=tag,
attrs=mark_safe(flatatt(attrs)) if attrs else '',
content=text_value(content),
)
def render_template_to_unicode(template, context=None):
"""
Render a Template to unicode
"""
if context is None:
context = {}
if not isinstance(template, Template):
template = get_template(template)
if VERSION > (1, 8):
return template.render(context)
return template.render(Context(context))
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.forms.widgets import flatatt
from django.template import Variable, VariableDoesNotExist, Template, Context
from django.template.base import FilterExpression, kwarg_re, TemplateSyntaxError
from django.template.loader import get_template
from django.utils.safestring import mark_safe
try:
from django.utils.html import format_html
except ImportError:
from .legacy import format_html_pre_18 as format_html
from .text import text_value
# RegEx for quoted string
QUOTED_STRING = re.compile(r'^["\'](?P<noquotes>.+)["\']$')
def handle_var(value, context):
"""
Handle template tag variable
"""
# Resolve FilterExpression and Variable immediately
if isinstance(value, FilterExpression) or isinstance(value, Variable):
return value.resolve(context)
# Return quoted strings unquoted
# http://djangosnippets.org/snippets/886
stringval = QUOTED_STRING.search(value)
if stringval:
return stringval.group('noquotes')
# Resolve variable or return string value
try:
return Variable(value).resolve(context)
except VariableDoesNotExist:
return value
def parse_token_contents(parser, token):
"""
Parse template tag contents
"""
bits = token.split_contents()
tag = bits.pop(0)
args = []
kwargs = {}
asvar = None
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError(
'Malformed arguments to tag "{}"'.format(tag))
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return {
'tag': tag,
'args': args,
'kwargs': kwargs,
'asvar': asvar,
}
def split_css_classes(css_classes):
"""
Turn string into a list of CSS classes
"""
classes_list = text_value(css_classes).split(' ')
return [c for c in classes_list if c]
def add_css_class(css_classes, css_class, prepend=False):
"""
Add a CSS class to a string of CSS classes
"""
classes_list = split_css_classes(css_classes)
classes_to_add = [c for c in split_css_classes(css_class)
if c not in classes_list]
if prepend:
classes_list = classes_to_add + classes_list
else:
classes_list += classes_to_add
return ' '.join(classes_list)
def remove_css_class(css_classes, css_class):
"""
Remove a CSS class from a string of CSS classes
"""
remove = set(split_css_classes(css_class))
classes_list = [c for c in split_css_classes(css_classes)
if c not in remove]
return ' '.join(classes_list)
def render_link_tag(url, rel='stylesheet', media=None):
"""
Build a link tag
"""
attrs = {
'href': url,
'rel': rel,
}
if media:
attrs['media'] = media
return render_tag('link', attrs=attrs, close=False)
def render_tag(tag, attrs=None, content=None, close=True):
"""
Render a HTML tag
"""
builder = '<{tag}{attrs}>{content}'
if content or close:
builder += '</{tag}>'
return format_html(
builder,
tag=tag,
attrs=mark_safe(flatatt(attrs)) if attrs else '',
content=text_value(content),
)
def render_template_to_unicode(template, context=None):
"""
Render a Template to unicode
"""
if not isinstance(template, Template):
template = get_template(template)
if context is None:
context = {}
return template.render(Context(context)) | Python | 0.000067 |
60690c178f3adb5a2e05e4960e3b142dbf6c1aad | update cache | cache.py | cache.py | import inspect
import json as json
from functools import wraps
from hashlib import md5
def cache_json(func, key_prefix='', expire=0, expire_at='', redis_client=None):
"""key_prefix is optional.
if use, it should be unique at module level within the redis db,
__module__ & func_name & all arguments would also be part of the key.
redis_client: it's thread safe.
to avoid giving `redis_client` param every time, you could do this:
from functools import partial
from somewhere import my_redis_client
cache_json = partial(cache_json, redis_client=my_redis_client)
"""
@wraps(func)
def wrapped(_use_cache=True, *args, **kwargs):
"""set _use_cache to False if you do not want to use cache on this call.
"""
if _use_cache:
call_args = inspect.getcallargs(func, *args, **kwargs)
func_code = inspect.getsource(func)
args_hash = md5(json.dumps(call_args, sort_keys=True).encode()).hexdigest()
key = key_prefix + func.__module__ + func.__name__ + args_hash
cached = redis_client.get(key)
if cached is None:
ret = func(*args, **kwargs)
redis_client[key] = json.dumps(ret)
else:
ret = json.loads(cached)
return ret
else:
return func(*args, **kwargs)
return wrapped
def release_cache(func):
return
| import json as json
def cache_json(func, key_prefix='', expire=0, expire_at='', redis_client=None):
"""key_prefix should be unique at module level within the redis db,
func name & all arguments would also be part of the key.
redis_client: it's thread safe.
to avoid giving `redis_client` param every time, you could do this:
from functools import partial
from somewhere import my_redis_client
cache_json = partial(cache_json, redis_client=my_redis_client)
"""
def wrapped(_use_cache=True, *args, **kwargs):
if _use_cache:
return
else:
ret = func(*args, **kwargs)
return ret
return wrapped
| Python | 0.000001 |
c40376c36312e582704b4fafbc36f4b17171394f | switch to using selectors | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by roadhump
# Copyright (c) 2014 roadhump
#
# License: MIT
#
"""This module exports the ESLint plugin class."""
import logging
import re
from SublimeLinter.lint import NodeLinter
logger = logging.getLogger('SublimeLinter.plugin.eslint')
class ESLint(NodeLinter):
"""Provides an interface to the eslint executable."""
npm_name = 'eslint'
cmd = ('eslint', '--format', 'compact', '--stdin', '--stdin-filename', '@')
regex = (
r'^.+?: line (?P<line>\d+), col (?P<col>\d+), '
r'(?:(?P<error>Error)|(?P<warning>Warning)) - '
r'(?P<message>.+)'
)
crash_regex = re.compile(
r'^(.*?)\r?\n\w*(Oops! Something went wrong!)',
re.DOTALL
)
line_col_base = (1, 1)
defaults = {
'selector': 'source.js - meta.attribute-with-value, text.html.basic'
}
def find_errors(self, output):
"""Parse errors from linter's output.
Log errors when eslint crashes or can't find its configuration.
"""
match = self.crash_regex.match(output)
if match:
logger.error(output)
return []
return super().find_errors(output)
def split_match(self, match):
"""Extract and return values from match.
Return 'no match' for ignored files
"""
match, line, col, error, warning, message, near = super().split_match(match)
if message and message.startswith('File ignored'):
return match, None, None, None, None, '', None
return match, line, col, error, warning, message, near
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by roadhump
# Copyright (c) 2014 roadhump
#
# License: MIT
#
"""This module exports the ESLint plugin class."""
import logging
import re
from SublimeLinter.lint import NodeLinter
logger = logging.getLogger('SublimeLinter.plugin.eslint')
class ESLint(NodeLinter):
"""Provides an interface to the eslint executable."""
syntax = ('javascript', 'html')
npm_name = 'eslint'
cmd = ('eslint', '--format', 'compact', '--stdin', '--stdin-filename', '@')
regex = (
r'^.+?: line (?P<line>\d+), col (?P<col>\d+), '
r'(?:(?P<error>Error)|(?P<warning>Warning)) - '
r'(?P<message>.+)'
)
crash_regex = re.compile(
r'^(.*?)\r?\n\w*(Oops! Something went wrong!)',
re.DOTALL
)
line_col_base = (1, 1)
selectors = {
'html': 'source.js.embedded.html'
}
def find_errors(self, output):
"""Parse errors from linter's output.
Log errors when eslint crashes or can't find its configuration.
"""
match = self.crash_regex.match(output)
if match:
logger.error(output)
return []
return super().find_errors(output)
def split_match(self, match):
"""Extract and return values from match.
Return 'no match' for ignored files
"""
match, line, col, error, warning, message, near = super().split_match(match)
if message and message.startswith('File ignored'):
return match, None, None, None, None, '', None
return match, line, col, error, warning, message, near
| Python | 0.000002 |
d927e5dbf7820ad0e48006d9b2042b62c04bd310 | Update regex | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Clifton Kaznocha
# Copyright (c) 2014 Clifton Kaznocha
#
# License: MIT
#
"""This module exports the Flow plugin class."""
import os
from SublimeLinter.lint import Linter
class Flow(Linter):
"""Provides an interface to flow."""
syntax = ('javascript', 'html')
executable = 'flow'
version_args = '--version'
version_re = r'(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.1.0'
regex = r'''(?xi)
# Warning location and optional title for the message
/.+/(?P<file_name>.+):(?P<line>\d+):(?P<col>\d+),\d+:\s?(?P<message_title>.*)\r?\n
# Main lint message
(?P<message>.+)
# Optional message, only extract the text, leave the path
(\r?\n\s\s/.+:\s(?P<message_footer>.+))?
'''
multiline = True
defaults = {
# Allows the user to lint *all* files, regardless of whether they have the `/* @flow */` declaration at the top.
'all': False,
# Allow to bypass the 50 errors cap
'show-all-errors': True,
# Options for flow
'--lib:,': ''
}
word_re = r'^((\'|")?[^"\']+(\'|")?)(?=[\s\,\)\]])'
tempfile_suffix = '-'
selectors = {
'html': 'source.js.embedded.html'
}
config_file = ('.flowconfig')
def cmd(self):
"""Return the command line to execute."""
command = [self.executable_path, 'check']
if self.get_merged_settings()['show-all-errors']:
command.append('--show-all-errors')
if self.get_merged_settings()['all']:
command.append('--all')
return command
def split_match(self, match):
"""
Return the components of the match.
We override this to catch linter error messages and return better
error messages.
"""
if match:
open_file_name = os.path.basename(self.view.file_name())
linted_file_name = match.group('file_name')
if linted_file_name == open_file_name:
message_title = match.group('message_title')
message = match.group('message')
message_footer = match.group('message_footer') or ""
if message_title:
message = '"{0}"" {1} {2}'.format(
message_title,
message,
message_footer
)
line = max(int(match.group('line')) - 1, 0)
col = int(match.group('col')) - 1
# match, line, col, error, warning, message, near
return match, line, col, True, False, message, None
return match, None, None, None, None, '', None
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Clifton Kaznocha
# Copyright (c) 2014 Clifton Kaznocha
#
# License: MIT
#
"""This module exports the Flow plugin class."""
import os
from SublimeLinter.lint import Linter
class Flow(Linter):
"""Provides an interface to flow."""
syntax = ('javascript', 'html')
executable = 'flow'
version_args = '--version'
version_re = r'(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.1.0'
regex = r'''(?xi)
# Find the line number and col
^/.+/(?P<file_name>.+):(?P<line>\d+):(?P<col>\d+),\d+:\s*(?P<message1>.+)$\r?\n
# The second part of the message
^(?P<message2>.+)$\r?\n
# The third part of the message
^\s*.*:\d+:\d+,\d+:\s*(?P<message3>.+)\s*$
'''
multiline = True
defaults = {
# Allows the user to lint *all* files, regardless of whether they have the `/* @flow */` declaration at the top.
'all': False,
# Allow to bypass the 50 errors cap
'show-all-errors': True,
# Options for flow
'--lib:,': ''
}
word_re = r'^((\'|")?[^"\']+(\'|")?)(?=[\s\,\)\]])'
tempfile_suffix = '-'
selectors = {
'html': 'source.js.embedded.html'
}
config_file = ('.flowconfig')
def cmd(self):
"""Return the command line to execute."""
command = [self.executable_path, 'check']
if self.get_merged_settings()['show-all-errors']:
command.append('--show-all-errors')
if self.get_merged_settings()['all']:
command.append('--all')
return command
def split_match(self, match):
"""
Return the components of the match.
We override this to catch linter error messages and return better
error messages.
"""
if match:
open_file_name = os.path.basename(self.view.file_name())
linted_file_name = match.group('file_name')
if linted_file_name == open_file_name:
message = '"{0}"" {1} {2}'.format(
match.group('message1'),
match.group('message2'),
match.group('message3')
)
line = max(int(match.group('line')) - 1, 0)
col = int(match.group('col')) - 1
# match, line, col, error, warning, message, near
return match, line, col, True, False, message, None
return match, None, None, None, None, '', None
| Python | 0.000002 |
6a9d6d30dc7ea207e2f4d8179a5ef99a95fce4e5 | Fix bug in ListingGenerator with limit=None. | praw/models/listinggenerator.py | praw/models/listinggenerator.py | from .prawmodel import PRAWModel
class ListingGenerator(PRAWModel):
"""Instances of this class generate ``RedditModels``"""
def __init__(self, reddit, url, limit=100, params=None):
"""Initialize a ListingGenerator instance.
:param reddit: An instance of :class:`.Reddit`.
:param url: A URL returning a reddit listing.
:param limit: The number of content entries to fetch. If ``limit`` is
None, then fetch as many entries as possible. Most of reddit's
listings contain a maximum of 1000 items, and are returned 100 at a
time. This class will automatically issue all necessary
requests. (Default: 100)
:param params: A dictionary containing additional query string
parameters to send with the request.
"""
self._exhausted = False
self._list = None
self._list_index = None
self._reddit = reddit
self.after_field = 'after'
self.extract_list_index = None
self.limit = limit
self.params = params or {}
self.root_field = 'data'
self.thing_list_field = 'children'
self.url = url
self.yielded = 0
self.params['limit'] = self.limit or 1024
def __iter__(self):
return self
def __next__(self):
if self.limit is not None and self.yielded >= self.limit:
raise StopIteration()
if self._list is None or self._list_index >= len(self._list):
self._next_batch()
self._list_index += 1
self.yielded += 1
return self._list[self._list_index - 1]
def _next_batch(self):
if self._exhausted:
raise StopIteration()
page_data = self._reddit.request(self.url, params=self.params)
if self.extract_list_index is not None:
page_data = page_data[self.extract_list_index]
root = page_data[self.root_field]
self._list = root[self.thing_list_field]
self._list_index = 0
if len(self._list) == 0:
raise StopIteration()
if root.get(self.after_field):
self.params['after'] = root[self.after_field]
else:
self._exhausted = True
| from .prawmodel import PRAWModel
class ListingGenerator(PRAWModel):
"""Instances of this class generate ``RedditModels``"""
def __init__(self, reddit, url, limit=100, params=None):
"""Initialize a ListingGenerator instance.
:param reddit: An instance of :class:`.Reddit`.
:param url: A URL returning a reddit listing.
:param limit: The number of content entries to fetch. If ``limit`` is
None, then fetch as many entries as possible. Most of reddit's
listings contain a maximum of 1000 items, and are returned 100 at a
time. This class will automatically issue all necessary
requests. (Default: 100)
:param params: A dictionary containing additional query string
parameters to send with the request.
"""
self._exhausted = False
self._list = None
self._list_index = None
self._reddit = reddit
self.after_field = 'after'
self.extract_list_index = None
self.limit = limit
self.params = params or {}
self.root_field = 'data'
self.thing_list_field = 'children'
self.url = url
self.yielded = 0
self.params['limit'] = self.limit or 1024
def __iter__(self):
return self
def __next__(self):
if self.yielded >= self.limit:
raise StopIteration()
if self._list is None or self._list_index >= len(self._list):
self._next_batch()
self._list_index += 1
self.yielded += 1
return self._list[self._list_index - 1]
def _next_batch(self):
if self._exhausted:
raise StopIteration()
page_data = self._reddit.request(self.url, params=self.params)
if self.extract_list_index is not None:
page_data = page_data[self.extract_list_index]
root = page_data[self.root_field]
self._list = root[self.thing_list_field]
self._list_index = 0
if len(self._list) == 0:
raise StopIteration()
if root.get(self.after_field):
self.params['after'] = root[self.after_field]
else:
self._exhausted = True
| Python | 0 |
bf7562d9f45a777163f2ac775dc9cf4afe99a930 | Change 'language' to 'syntax', that is more precise terminology. | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter-contrib-jshint
# License: MIT
#
"""This module exports the JSHint plugin linter class."""
from SublimeLinter.lint import Linter
class JSHint(Linter):
"""Provides an interface to the jshint executable."""
syntax = ('javascript', 'html')
cmd = 'jshint --verbose -'
regex = r'^.+?: line (?P<line>\d+), col (?P<col>\d+), (?P<message>.+) \((?:(?P<error>E)|(?P<warning>W))\d+\)$'
selectors = {
'html': 'source.js.embedded.html'
}
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter-contrib-jshint
# License: MIT
#
"""This module exports the JSHint plugin linter class."""
from SublimeLinter.lint import Linter
class JSHint(Linter):
"""Provides an interface to the jshint executable."""
language = ('javascript', 'html')
cmd = 'jshint --verbose -'
regex = r'^.+?: line (?P<line>\d+), col (?P<col>\d+), (?P<message>.+) \((?:(?P<error>E)|(?P<warning>W))\d+\)$'
selectors = {
'html': 'source.js.embedded.html'
}
| Python | 0.002004 |
cea40608a1efe16310c7b978fba40abcde26ced4 | make flake8 happy | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Dan Flettre
# Copyright (c) 2015 Dan Flettre
#
# License: MIT
#
"""This module exports the Semistandard plugin class."""
from SublimeLinter.lint import NodeLinter
class Semistandard(NodeLinter):
"""Provides an interface to semistandard."""
syntax = ('javascript', 'html', 'javascriptnext', 'javascript 6to5')
cmd = 'semistandard'
version_args = '--version'
version_re = r'(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 2.3.1'
regex = r'^\s.+:(?P<line>\d+):(?P<col>\d+):(?P<message>.+)'
selectors = {
'html': 'source.js.embedded.html'
}
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Dan Flettre
# Copyright (c) 2015 Dan Flettre
#
# License: MIT
#
"""This module exports the Semistandard plugin class."""
from SublimeLinter.lint import NodeLinter, util
class Semistandard(NodeLinter):
"""Provides an interface to semistandard."""
syntax = ('javascript', 'html', 'javascriptnext', 'javascript 6to5')
cmd = 'semistandard'
version_args = '--version'
version_re = r'(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 2.3.1'
regex = r'^\s.+:(?P<line>\d+):(?P<col>\d+):(?P<message>.+)'
selectors = {
'html': 'source.js.embedded.html'
}
| Python | 0 |
9bfe8cd21931c69d79657aa275be02af21ec78f1 | Simplify `cmd` property | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Bartosz Kruszczynski
# Copyright (c) 2015 Bartosz Kruszczynski
#
# License: MIT
#
"""This module exports the Reek plugin class."""
from SublimeLinter.lint import RubyLinter
import re
class Reek(RubyLinter):
"""Provides an interface to reek."""
syntax = (
'better rspec',
'betterruby',
'cucumber steps',
'rspec',
'ruby experimental',
'ruby on rails',
'ruby'
)
cmd = 'reek'
regex = r'^.+?\[(?P<line>\d+).*\]:(?P<message>.+) \[.*\]'
tempfile_suffix = 'rb'
version_re = r'reek\s(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 3.5.0'
config_file = ('-c', 'config.reek')
def split_match(self, match):
"""Extract named capture groups from the regex and return them as a tuple."""
match, line, col, error, warning, message, _ = super().split_match(match)
near = self.search_token(message)
return match, line, col, error, warning, message, near
def search_token(self, message):
"""Search text token to be highlighted."""
# First search for variable name enclosed in single quotes
m = re.search("'.*'", message)
# If there's no variable name search for nil-check message
if m is None:
m = re.search('nil(?=-check)', message)
# If there's no nil-check search for method name that comes after a `#`
if m is None:
m = re.search('(?<=#)\S+', message)
return m.group(0) if m else None
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Bartosz Kruszczynski
# Copyright (c) 2015 Bartosz Kruszczynski
#
# License: MIT
#
"""This module exports the Reek plugin class."""
from SublimeLinter.lint import RubyLinter
import re
class Reek(RubyLinter):
"""Provides an interface to reek."""
syntax = (
'better rspec',
'betterruby',
'cucumber steps',
'rspec',
'ruby experimental',
'ruby on rails',
'ruby'
)
cmd = 'ruby -S reek'
regex = r'^.+?\[(?P<line>\d+).*\]:(?P<message>.+) \[.*\]'
tempfile_suffix = 'rb'
version_args = '-S reek -v'
version_re = r'reek\s(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 3.5.0'
config_file = ('-c', 'config.reek')
def split_match(self, match):
"""Extract named capture groups from the regex and return them as a tuple."""
match, line, col, error, warning, message, _ = super().split_match(match)
near = self.search_token(message)
return match, line, col, error, warning, message, near
def search_token(self, message):
"""Search text token to be highlighted."""
# First search for variable name enclosed in single quotes
m = re.search("'.*'", message)
# If there's no variable name search for nil-check message
if m is None:
m = re.search('nil(?=-check)', message)
# If there's no nil-check search for method name that comes after a `#`
if m is None:
m = re.search('(?<=#)\S+', message)
return m.group(0) if m else None
| Python | 0 |
d20d035516f279b00deeae9ad55d3540f02eaf33 | Fix deprecation warnings | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Fred Callaway
# Copyright (c) 2015 Fred Callaway
# Copyright (c) 2017 FichteFoll <fichtefoll2@googlemail.com>
#
# License: MIT
#
"""This module exports the Mypy plugin class."""
import logging
import os
import shutil
import tempfile
import getpass
from SublimeLinter.lint import const
from SublimeLinter.lint import PythonLinter
USER = getpass.getuser()
TMPDIR_PREFIX = "SublimeLinter-contrib-mypy-%s" % USER
logger = logging.getLogger("SublimeLinter.plugin.mypy")
# Mapping for our created temporary directories.
# For smarter caching purposes,
# we index different cache folders based on the working dir.
tmpdirs = {}
class Mypy(PythonLinter):
"""Provides an interface to mypy."""
regex = r'^(\w:)?[^:]+:(?P<line>\d+):((?P<col>\d+):)?\s*(?P<error_type>[^:]+):\s*(?P<message>.+)'
line_col_base = (1, 1)
tempfile_suffix = 'py'
default_type = const.WARNING
# Pretty much all interesting options don't expect a value,
# so you'll have to specify those in "args" anyway.
# This dict only contains settings for which we have special handling.
defaults = {
'selector': "source.python",
# Will default to tempfile.TemporaryDirectory if empty.
"--cache-dir:": "",
# Allow users to disable this
"--incremental": True,
# Need this to silent lints for other files. Alternatively: 'skip'
"--follow-imports:": "silent",
}
def cmd(self):
"""Return a list with the command line to execute."""
cmd = [
'mypy',
'${args}',
'--show-column-numbers',
'--hide-error-context',
# '--incremental',
]
if self.filename:
cmd.extend([
# --shadow-file SOURCE_FILE SHADOW_FILE
#
# '@' needs to be the (temporary) shadow file,
# while we request the normal filename
# to be checked in its normal environment.
'--shadow-file', '${file}', '${temp_file}',
# The file we want to lint on the surface
'${file}',
])
else:
cmd.append('${temp_file}')
# Add a temporary cache dir to the command if none was specified.
# Helps keep the environment clean
# by not littering everything with `.mypy_cache` folders.
if not self.settings.get('cache-dir'):
cwd = self.get_working_dir()
if cwd in tmpdirs:
cache_dir = tmpdirs[cwd].name
else:
tmp_dir = tempfile.TemporaryDirectory(prefix=TMPDIR_PREFIX)
tmpdirs[cwd] = tmp_dir
cache_dir = tmp_dir.name
logger.info("Created temporary cache dir at: %s", cache_dir)
cmd[1:1] = ["--cache-dir", cache_dir]
return cmd
def _cleanup_tmpdirs():
def _onerror(function, path, exc_info):
logger.exception("Unable to delete '%s' while cleaning up temporary directory", path,
exc_info=exc_info)
tmpdir = tempfile.gettempdir()
for dirname in os.listdir(tmpdir):
if dirname.startswith(TMPDIR_PREFIX):
shutil.rmtree(os.path.join(tmpdir, dirname), onerror=_onerror)
def plugin_loaded():
"""Attempt to clean up temporary directories from previous runs."""
_cleanup_tmpdirs()
def plugin_unloaded():
"""Clear references to TemporaryDirectory instances.
They should then be removed automatically.
"""
# (Actually, do we even need to do this?)
tmpdirs.clear()
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Fred Callaway
# Copyright (c) 2015 Fred Callaway
# Copyright (c) 2017 FichteFoll <fichtefoll2@googlemail.com>
#
# License: MIT
#
"""This module exports the Mypy plugin class."""
import logging
import os
import shutil
import tempfile
import getpass
from SublimeLinter.lint import const
from SublimeLinter.lint import PythonLinter
USER = getpass.getuser()
TMPDIR_PREFIX = "SublimeLinter-contrib-mypy-%s" % USER
logger = logging.getLogger("SublimeLinter.plugin.mypy")
# Mapping for our created temporary directories.
# For smarter caching purposes,
# we index different cache folders based on the working dir.
tmpdirs = {}
class Mypy(PythonLinter):
"""Provides an interface to mypy."""
regex = r'^(\w:)?[^:]+:(?P<line>\d+):((?P<col>\d+):)?\s*(?P<error_type>[^:]+):\s*(?P<message>.+)'
line_col_base = (1, 1)
tempfile_suffix = 'py'
default_type = const.WARNING
# Pretty much all interesting options don't expect a value,
# so you'll have to specify those in "args" anyway.
# This dict only contains settings for which we have special handling.
defaults = {
'selector': "source.python",
# Will default to tempfile.TemporaryDirectory if empty.
"--cache-dir:": "",
# Allow users to disable this
"--incremental": True,
# Need this to silent lints for other files. Alternatively: 'skip'
"--follow-imports:": "silent",
}
def cmd(self):
"""Return a list with the command line to execute."""
cmd = [
'mypy',
'${args}',
'--show-column-numbers',
'--hide-error-context',
# '--incremental',
]
if self.filename:
cmd.extend([
# --shadow-file SOURCE_FILE SHADOW_FILE
#
# '@' needs to be the (temporary) shadow file,
# while we request the normal filename
# to be checked in its normal environment.
'--shadow-file', '${file}', '${temp_file}',
# The file we want to lint on the surface
'${file}',
])
else:
cmd.append('${temp_file}')
# Add a temporary cache dir to the command if none was specified.
# Helps keep the environment clean
# by not littering everything with `.mypy_cache` folders.
settings = self.get_view_settings()
if not settings.get('cache-dir'):
cwd = self.get_working_dir(settings)
if cwd in tmpdirs:
cache_dir = tmpdirs[cwd].name
else:
tmp_dir = tempfile.TemporaryDirectory(prefix=TMPDIR_PREFIX)
tmpdirs[cwd] = tmp_dir
cache_dir = tmp_dir.name
logger.info("Created temporary cache dir at: %s", cache_dir)
cmd[1:1] = ["--cache-dir", cache_dir]
return cmd
def _cleanup_tmpdirs():
def _onerror(function, path, exc_info):
logger.exception("Unable to delete '%s' while cleaning up temporary directory", path,
exc_info=exc_info)
tmpdir = tempfile.gettempdir()
for dirname in os.listdir(tmpdir):
if dirname.startswith(TMPDIR_PREFIX):
shutil.rmtree(os.path.join(tmpdir, dirname), onerror=_onerror)
def plugin_loaded():
"""Attempt to clean up temporary directories from previous runs."""
_cleanup_tmpdirs()
def plugin_unloaded():
"""Clear references to TemporaryDirectory instances.
They should then be removed automatically.
"""
# (Actually, do we even need to do this?)
tmpdirs.clear()
| Python | 0.00011 |
68293b6075ead70651924761e4e3187286ad6765 | Add the proper tests user/pass. | integration_tests/test_basic_page_loads.py | integration_tests/test_basic_page_loads.py | from django.contrib.auth.models import User
from django.test import testcases
from django.test.client import Client
class Fail(testcases.TestCase):
def setUp(self):
super(Fail, self).setUp()
u = User(username='john_doe')
u.set_password('password')
u.is_superuser = True
u.save()
self.client = Client()
def test_require_login(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://testserver/user/login?next=/')
def test_login(self):
response = self.client.post('/user/login?next=/',
{'username': 'john_doe', 'password': 'password'})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://testserver/')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_login_redirect_correct(self):
response = self.client.post('/user/login?next=/dummy_url',
{'username': 'john_doe', 'password': 'password'})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://testserver/dummy_url')
def test_profile(self):
self.client.post('/user/login',
{'username': 'john_doe', 'password': 'password'})
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
| from django.contrib.auth.models import User
from django.test import testcases
from django.test.client import Client
class Fail(testcases.TestCase):
def setUp(self):
super(Fail, self).setUp()
u = User(username='john_doe')
u.set_password('password')
u.is_superuser = True
u.save()
self.client = Client()
def test_require_login(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://testserver/user/login?next=/')
def test_login(self):
response = self.client.post('/user/login?next=/',
{'username': 'ivailo', 'password': 'Heman3f5'})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://testserver/')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_login_redirect_correct(self):
response = self.client.post('/user/login?next=/dummy_url',
{'username': 'ivailo', 'password': 'Heman3f5'})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://testserver/dummy_url')
def test_profile(self):
self.client.post('/user/login',
{'username': 'ivailo', 'password': 'Heman3f5'})
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
| Python | 0 |
005c684b88e6383aabe5294bfa0104ba4fb3ed40 | Use the fancier tmp_file management in tests | test/ops/test_index.py | test/ops/test_index.py | """
Tests for index operations
"""
from unittest import TestCase
import os
import sys
import xarray as xr
import pandas as pd
import numpy as np
from datetime import datetime
import tempfile
import shutil
from contextlib import contextmanager
import itertools
from cate.ops import index
from cate.ops import subset
def assert_dataset_equal(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it checks each aspect
# of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
_counter = itertools.count()
ON_WIN = sys.platform == 'win32'
@contextmanager
def create_tmp_file():
tmp_dir = tempfile.mkdtemp()
path = os.path.join(tmp_dir, 'tmp_file_{}.nc'.format(next(_counter)))
try:
yield path
finally:
try:
shutil.rmtree(tmp_dir)
except OSError:
if not ON_WIN:
raise
class TestIndices(TestCase):
def test_n34(self):
"""
Test ENSO index calculation using Nino34 region
"""
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)] +
[datetime(2002, x, 1) for x in range(1, 13)])})
actual = subset.subset_spatial(dataset, "-20, -10, 20, 10")
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([5, 10, 24])),
'second': (['lat', 'lon', 'time'], np.ones([5, 10, 24])),
'lat': np.linspace(-8, 8, 5),
'lon': np.linspace(-18, 18, 10),
'time': ([datetime(2001, x, 1) for x in range(1, 13)] +
[datetime(2002, x, 1) for x in range(1, 13)])})
assert_dataset_equal(expected, actual)
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1,13)]})
lta = 2*lta
with create_tmp_file() as tmp_file:
lta.to_netcdf(tmp_file)
ret = index.enso_nino34(dataset, 'first', tmp_file)
print(ret)
def test_preset_region(self):
"""
Test ENSO index calculation using a pre-defined region
"""
pass
def test_custom(self):
"""
Test ENSO index calculation using a user-supplied region
"""
pass
def test_oni(self):
"""
Test ONI index calculation.
"""
pass
| """
Tests for index operations
"""
from unittest import TestCase
import os
import xarray as xr
import pandas as pd
import numpy as np
from datetime import datetime
from cate.ops import index
from cate.ops import subset
def assert_dataset_equal(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it checks each aspect
# of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
class TestIndices(TestCase):
def test_n34(self):
"""
Test ENSO index calculation using Nino34 region
"""
tmp_path = 'temp_lta.nc'
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 24])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': ([datetime(2001, x, 1) for x in range(1, 13)] +
[datetime(2002, x, 1) for x in range(1, 13)])})
actual = subset.subset_spatial(dataset, "-20, -10, 20, 10")
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([5, 10, 24])),
'second': (['lat', 'lon', 'time'], np.ones([5, 10, 24])),
'lat': np.linspace(-8, 8, 5),
'lon': np.linspace(-18, 18, 10),
'time': ([datetime(2001, x, 1) for x in range(1, 13)] +
[datetime(2002, x, 1) for x in range(1, 13)])})
assert_dataset_equal(expected, actual)
lta = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x for x in range(1,13)]})
lta = 2*lta
lta.to_netcdf(tmp_path)
ret = index.enso_nino34(dataset, 'first', tmp_path)
print(ret)
try:
os.remove(tmp_path)
except OSError:
# Doesn't exist
pass
def test_preset_region(self):
"""
Test ENSO index calculation using a pre-defined region
"""
pass
def test_custom(self):
"""
Test ENSO index calculation using a user-supplied region
"""
pass
def test_oni(self):
"""
Test ONI index calculation.
"""
pass
| Python | 0 |
759a6994441c35400965beea19e6425b377cf4e8 | add datetime_format | cloud.py | cloud.py | # coding: utf-8
import leancloud
from leancloud import Engine
from leancloud import LeanEngineError
from app import app
from logentries import LogentriesHandler
import logging
from qiniu import Auth
from qiniu import BucketManager
import requests
import os
import json
import time
engine = Engine(app)
log = logging.getLogger('logentries')
log.setLevel(logging.INFO)
log.addHandler(LogentriesHandler(os.environ.get('logentries_key')))
access_key = os.environ.get('qiniu_ak')
secret_key = os.environ.get('qiniu_sk')
bucket_name = os.environ.get('qiniu_bn')
bucket_domain = os.environ.get('qiniu_bd')
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
hook_url = os.environ.get('hook_url')
# local_time = time.localtime()
# # day_stamp = time.strftime("%Y-%m-%d")
# day_stamp = '20161013'
# collection = db['realtime_quotes_'+day_stamp]
datetime_format = "%Y-%m-%d %H:%M:%S"
granularity =10
def cache_sina_stock_gif(stock_code):
if stock_code.startswith('60'):
sina_code = 'sh'+stock_code
else:
sina_code = 'sz'+stock_code
image_url = 'http://image.sinajs.cn/newchart/min/n/{sina_code}.gif'.format(sina_code=sina_code)
ts = int(time.time())
key = stock_code +'-'+str(ts) + '-sina.gif'
ret, info = bucket.fetch(image_url, bucket_name, key)
# log.info(stock_code+' '+str(info))
if '200' in str(info)[0:50]:
return bucket_domain+key
else:
return image_url
def alarming_bearychat(msg):
stock_code = msg['stock_code']
img_url = cache_sina_stock_gif(stock_code)
src = u'新图' if 'sinajs' in img_url else u'缓存'
bearychat_msg ={
"text": '**'+msg['name']+' '+ stock_code+'**\n>'+' | '.join(msg['time_list']),
"markdown": True,
"attachments": [{
"text": msg['name']+u" 分时图 ("+ src +') '+time.strftime(datetime_format),
"color": "#ff0000",
"images": [{"url": img_url}]
}]
}
headers = {
'Content-Type': 'application/json'
}
requests.post(hook_url,headers = headers,data = json.dumps(bearychat_msg))
def test_alarming_bearychat(msg):
stock_code = msg['stock_code']
img_url = cache_sina_stock_gif(stock_code)
src = u'新图' if 'sinajs' in img_url else u'缓存'
bearychat_msg ={
"text": '**'+str(msg['index'])+'.'+msg['name']+' '+ stock_code+'**\n>'+' | '.join(msg['time_list']),
"markdown": True,
"attachments": [{
"text": msg['name']+u" 分时图 ("+ src +') '+time.strftime(datetime_format),
"color": "#ff0000",
"images": [{"url": img_url}]
}]
}
headers = {
'Content-Type': 'application/json'
}
log.info(json.dumps(bearychat_msg))
requests.post(hook_url,headers = headers,data = json.dumps(bearychat_msg))
@engine.after_save('Alert') # Alert 为需要 hook 的 class 的名称
def after_alert_save(alert):
try:
msg = alert.get('msg')
test_alarming_bearychat(msg)
log.info(msg)
except leancloud.LeanCloudError:
raise leancloud.LeanEngineError(message='An error occurred while trying to save the Alert. ')
| # coding: utf-8
import leancloud
from leancloud import Engine
from leancloud import LeanEngineError
from app import app
from logentries import LogentriesHandler
import logging
from qiniu import Auth
from qiniu import BucketManager
import requests
import os
import json
import time
engine = Engine(app)
log = logging.getLogger('logentries')
log.setLevel(logging.INFO)
log.addHandler(LogentriesHandler(os.environ.get('logentries_key')))
access_key = os.environ.get('qiniu_ak')
secret_key = os.environ.get('qiniu_sk')
bucket_name = os.environ.get('qiniu_bn')
bucket_domain = os.environ.get('qiniu_bd')
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
hook_url = os.environ.get('hook_url')
def cache_sina_stock_gif(stock_code):
if stock_code.startswith('60'):
sina_code = 'sh'+stock_code
else:
sina_code = 'sz'+stock_code
image_url = 'http://image.sinajs.cn/newchart/min/n/{sina_code}.gif'.format(sina_code=sina_code)
ts = int(time.time())
key = stock_code +'-'+str(ts) + '-sina.gif'
ret, info = bucket.fetch(image_url, bucket_name, key)
# log.info(stock_code+' '+str(info))
if '200' in str(info)[0:50]:
return bucket_domain+key
else:
return image_url
def alarming_bearychat(msg):
stock_code = msg['stock_code']
img_url = cache_sina_stock_gif(stock_code)
src = u'新图' if 'sinajs' in img_url else u'缓存'
bearychat_msg ={
"text": '**'+msg['name']+' '+ stock_code+'**\n>'+' | '.join(msg['time_list']),
"markdown": True,
"attachments": [{
"text": msg['name']+u" 分时图 ("+ src +') '+time.strftime(datetime_format),
"color": "#ff0000",
"images": [{"url": img_url}]
}]
}
headers = {
'Content-Type': 'application/json'
}
requests.post(hook_url,headers = headers,data = json.dumps(bearychat_msg))
def test_alarming_bearychat(msg):
stock_code = msg['stock_code']
img_url = cache_sina_stock_gif(stock_code)
src = u'新图' if 'sinajs' in img_url else u'缓存'
bearychat_msg ={
"text": '**'+str(msg['index'])+'.'+msg['name']+' '+ stock_code+'**\n>'+' | '.join(msg['time_list']),
"markdown": True,
"attachments": [{
"text": msg['name']+u" 分时图 ("+ src +') '+time.strftime(datetime_format),
"color": "#ff0000",
"images": [{"url": img_url}]
}]
}
headers = {
'Content-Type': 'application/json'
}
log.info(json.dumps(bearychat_msg))
requests.post(hook_url,headers = headers,data = json.dumps(bearychat_msg))
@engine.after_save('Alert') # Alert 为需要 hook 的 class 的名称
def after_alert_save(alert):
try:
msg = alert.get('msg')
test_alarming_bearychat(msg)
log.info(msg)
except leancloud.LeanCloudError:
raise leancloud.LeanEngineError(message='An error occurred while trying to save the Alert. ')
| Python | 0.002308 |
853dc8de1d077494c707a5ec8a6b75ac0e0628cf | Add trailing slash to URL for consistency. | cadorsfeed/views.py | cadorsfeed/views.py | from werkzeug import redirect, Response
from werkzeug.exceptions import NotFound
from cadorsfeed.utils import expose, url_for, db
from parse import parse
from fetch import fetchLatest, fetchReport
@expose('/report/latest/')
def latest_report(request):
if 'latest' in db:
latestDate = db['latest']
else:
latestDate = fetchLatest()
db['latest'] = latestDate
db.expire('latest',60*60)
(year, month, day) = latestDate.split('-')
return redirect(url_for('do_report', year=year, month=month, day=day))
@expose('/report/<int:year>/<int:month>/<int:day>/')
def do_report(request, year, month, day):
refetch = request.args.get('refetch','0') == '1'
reparse = request.args.get('reparse','0') == '1' or refetch
date = "{year:04.0f}-{month:02.0f}-{day:02.0f}".format(
year=year, month=month, day=day)
key = "report:"+date
if db.hexists(key, "output") and not reparse:
output = db.hget(key, "output")
else:
if db.hexists(key, "input") and not refetch:
input = db.hget(key, "input").decode('utf-8')
else:
input = fetchReport(date)
db.hset(key, "input", input)
output = parse(input)
db.hset(key,"output", output)
return Response(output, mimetype="application/atom+xml")
| from werkzeug import redirect, Response
from werkzeug.exceptions import NotFound
from cadorsfeed.utils import expose, url_for, db
from parse import parse
from fetch import fetchLatest, fetchReport
@expose('/report/latest')
def latest_report(request):
if 'latest' in db:
latestDate = db['latest']
else:
latestDate = fetchLatest()
db['latest'] = latestDate
db.expire('latest',60*60)
(year, month, day) = latestDate.split('-')
return redirect(url_for('do_report', year=year, month=month, day=day))
@expose('/report/<int:year>/<int:month>/<int:day>/')
def do_report(request, year, month, day):
refetch = request.args.get('refetch','0') == '1'
reparse = request.args.get('reparse','0') == '1' or refetch
date = "{year:04.0f}-{month:02.0f}-{day:02.0f}".format(
year=year, month=month, day=day)
key = "report:"+date
if db.hexists(key, "output") and not reparse:
output = db.hget(key, "output")
else:
if db.hexists(key, "input") and not refetch:
input = db.hget(key, "input").decode('utf-8')
else:
input = fetchReport(date)
db.hset(key, "input", input)
output = parse(input)
db.hset(key,"output", output)
return Response(output, mimetype="application/atom+xml")
| Python | 0 |
cb50a43435de4e3b62324d1b738f3775cabe7367 | Fix reverse url in RecentChangesFeed | candidates/feeds.py | candidates/feeds.py | from __future__ import unicode_literals
import re
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from .models import LoggedAction
lock_re = re.compile(r'^(?:Unl|L)ocked\s*constituency (.*) \((\d+)\)$')
class RecentChangesFeed(Feed):
site_name = Site.objects.get_current().name
title = _("{site_name} recent changes").format(site_name=site_name)
description = _("Changes to {site_name} candidates").format(site_name=site_name)
link = "/feeds/changes.xml"
feed_type = Atom1Feed
def items(self):
return LoggedAction.objects.order_by('-updated')[:50]
def item_title(self, item):
return "{0} - {1}".format(
item.person_id,
item.action_type
)
def item_description(self, item):
updated = _("Updated at {0}").format(str(item.updated))
description = "{0}\n\n{1}\n".format(item.source, updated)
return description
def item_link(self, item):
# As a hack for the moment, constituencies are just mentioned
# in the source message:
if item.person_id:
return reverse('person-view', args=[item.person_id])
else:
return '/'
| from __future__ import unicode_literals
import re
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from .models import LoggedAction
lock_re = re.compile(r'^(?:Unl|L)ocked\s*constituency (.*) \((\d+)\)$')
class RecentChangesFeed(Feed):
site_name = Site.objects.get_current().name
title = _("{site_name} recent changes").format(site_name=site_name)
description = _("Changes to {site_name} candidates").format(site_name=site_name)
link = "/feeds/changes.xml"
feed_type = Atom1Feed
def items(self):
return LoggedAction.objects.order_by('-updated')[:50]
def item_title(self, item):
m = lock_re.search(item.source)
if m:
return "{0} - {1}".format(
m.group(1),
item.action_type
)
else:
return "{0} - {1}".format(
item.person_id,
item.action_type
)
def item_description(self, item):
updated = _("Updated at {0}").format(str(item.updated))
description = "{0}\n\n{1}\n".format(item.source, updated)
return description
def item_link(self, item):
# As a hack for the moment, constituencies are just mentioned
# in the source message:
m = lock_re.search(item.source)
if m:
return reverse('constituency', kwargs={
'post_id': m.group(2),
'ignored_slug': slugify(m.group(1))
})
else:
if item.person_id:
return reverse('person-view', args=[item.person_id])
else:
return '/'
| Python | 0.999993 |
63caf1fceb94d185e73858c2b58c82bf5912b7c4 | Add documentation for coding formatter | beetsplug/hook.py | beetsplug/hook.py | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows custom commands to be run when an event is emitted by beets"""
from __future__ import division, absolute_import, print_function
import string
import subprocess
from beets.plugins import BeetsPlugin
from beets.ui import _arg_encoding
from beets.util import shlex_split
class CodingFormatter(string.Formatter):
"""A custom string formatter that decodes the format string and it's
fields.
"""
def __init__(self, coding):
"""Creates a new coding formatter with the provided coding."""
self._coding = coding
def format(self, format_string, *args, **kwargs):
"""Formats the provided string using the provided arguments and keyword
arguments.
This method decodes the format string using the formatter's coding.
See str.format and string.Formatter.format.
"""
try:
format_string = format_string.decode(self._coding)
except UnicodeEncodeError:
pass
return super(CodingFormatter, self).format(format_string, *args,
**kwargs)
def convert_field(self, value, conversion):
"""Converts the provided value given a conversion type.
This method decodes the converted value using the formatter's coding.
See string.Formatter.convert_field.
"""
converted = super(CodingFormatter, self).convert_field(value,
conversion)
try:
converted = converted.decode(self._coding)
except UnicodeEncodeError:
pass
return converted
class HookPlugin(BeetsPlugin):
"""Allows custom commands to be run when an event is emitted by beets"""
def __init__(self):
super(HookPlugin, self).__init__()
self.config.add({
'hooks': []
})
hooks = self.config['hooks'].get(list)
for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index]
hook_event = hook['event'].get()
hook_command = hook['command'].get()
self.create_and_register_hook(hook_event, hook_command)
def create_and_register_hook(self, event, command):
def hook_function(**kwargs):
if command is None or len(command) == 0:
self._log.error('invalid command "{0}"', command)
return
encoding = _arg_encoding()
formatter = CodingFormatter(encoding)
formatted_command = formatter.format(command, event=event,
**kwargs)
command_pieces = shlex_split(formatted_command)
self._log.debug(u'running command "{0}" for event {1}',
formatted_command, event)
try:
subprocess.Popen(command_pieces).wait()
except OSError as exc:
self._log.error(u'hook for {0} failed: {1}', event, exc)
self.register_listener(event, hook_function)
| # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows custom commands to be run when an event is emitted by beets"""
from __future__ import division, absolute_import, print_function
import string
import subprocess
from beets.plugins import BeetsPlugin
from beets.ui import _arg_encoding
from beets.util import shlex_split
class CodingFormatter(string.Formatter):
def __init__(self, coding):
self._coding = coding
def format(self, format_string, *args, **kwargs):
try:
format_string = format_string.decode(self._coding)
except UnicodeEncodeError:
pass
return super(CodingFormatter, self).format(format_string, *args,
**kwargs)
def convert_field(self, value, conversion):
converted = super(CodingFormatter, self).convert_field(value,
conversion)
try:
converted = converted.decode(self._coding)
except UnicodeEncodeError:
pass
return converted
class HookPlugin(BeetsPlugin):
"""Allows custom commands to be run when an event is emitted by beets"""
def __init__(self):
super(HookPlugin, self).__init__()
self.config.add({
'hooks': []
})
hooks = self.config['hooks'].get(list)
for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index]
hook_event = hook['event'].get()
hook_command = hook['command'].get()
self.create_and_register_hook(hook_event, hook_command)
def create_and_register_hook(self, event, command):
def hook_function(**kwargs):
if command is None or len(command) == 0:
self._log.error('invalid command "{0}"', command)
return
encoding = _arg_encoding()
formatter = CodingFormatter(encoding)
formatted_command = formatter.format(command, event=event,
**kwargs)
command_pieces = shlex_split(formatted_command)
self._log.debug(u'running command "{0}" for event {1}',
formatted_command, event)
try:
subprocess.Popen(command_pieces).wait()
except OSError as exc:
self._log.error(u'hook for {0} failed: {1}', event, exc)
self.register_listener(event, hook_function)
| Python | 0 |
847d9c4a1e88b9e00a3be082db635743866a8abd | Fix tests | catalog/__init__.py | catalog/__init__.py | from os import environ
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
DB_URL = 'postgresql:///catalog' + ('_test' if environ.get('ENV') == 'test' else '')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = DB_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
if environ.get('ENV') != 'test':
from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
csrf.init_app(app)
else:
app.jinja_env.globals['csrf_token'] = lambda: 'test' | from os import environ
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
DB_URL = 'postgresql:///catalog' + ('_test' if environ.get('ENV') == 'test' else '')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = DB_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
csrf = CSRFProtect()
csrf.init_app(app) | Python | 0.000003 |
31fd889ec6d8851ce61085b0cbd15b86195905a8 | remove unused imports | test/scripts/window.py | test/scripts/window.py | #!/usr/bin/env python
"""
This program is carefully crafted to exercise a number of corner-cases in
Qtile.
"""
from __future__ import print_function
import sys
import time
import xcffib
import xcffib.xproto
def configure(window):
window.configure(
width=100,
height=100,
x=0,
y=0,
border_width=1,
)
for i in range(20):
try:
conn = xcffib.connect(display=sys.argv[1])
except xcffib.ConnectionException:
time.sleep(0.1)
continue
except Exception as v:
print("Error opening test window: ", type(v), v, file=sys.stderr)
sys.exit(1)
break
else:
print("Could not open window on display %s" % (sys.argv[1]), file=sys.stderr)
sys.exit(1)
screen = conn.get_setup().roots[conn.pref_screen]
window = conn.generate_id()
background = conn.core.AllocColor(screen.default_colormap, 0x2828, 0x8383, 0xCECE).reply().pixel # Color "#2883ce"
conn.core.CreateWindow(xcffib.CopyFromParent, window, screen.root,
100, 100, 100, 100, 1,
xcffib.xproto.WindowClass.InputOutput, screen.root_visual,
xcffib.xproto.CW.BackPixel | xcffib.xproto.CW.EventMask,
[background, xcffib.xproto.EventMask.StructureNotify | xcffib.xproto.EventMask.Exposure])
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.Atom.STRING, 8, len(sys.argv[2]),
sys.argv[2])
wm_protocols = "WM_PROTOCOLS"
wm_protocols = conn.core.InternAtom(0, len(wm_protocols), wm_protocols).reply().atom
wm_delete_window = "WM_DELETE_WINDOW"
wm_delete_window = conn.core.InternAtom(0, len(wm_delete_window), wm_delete_window).reply().atom
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, wm_protocols,
xcffib.xproto.Atom.ATOM, 32, 1,
[wm_delete_window])
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
conn.core.MapWindow(window)
conn.flush()
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
try:
while 1:
event = conn.wait_for_event()
if event.__class__ == xcffib.xproto.ClientMessageEvent:
if conn.core.GetAtomName(event.type).reply().name.as_string() == "WM_DELETE_WINDOW":
sys.exit(1)
except xcffib.XcffibException:
pass
| #!/usr/bin/env python
"""
This program is carefully crafted to exercise a number of corner-cases in
Qtile.
"""
from __future__ import print_function
import sys
import time
import struct
import xcffib
import xcffib.xproto
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
def configure(window):
window.configure(
width=100,
height=100,
x=0,
y=0,
border_width=1,
)
for i in range(20):
try:
conn = xcffib.connect(display=sys.argv[1])
except xcffib.ConnectionException:
time.sleep(0.1)
continue
except Exception as v:
print("Error opening test window: ", type(v), v, file=sys.stderr)
sys.exit(1)
break
else:
print("Could not open window on display %s" % (sys.argv[1]), file=sys.stderr)
sys.exit(1)
screen = conn.get_setup().roots[conn.pref_screen]
window = conn.generate_id()
background = conn.core.AllocColor(screen.default_colormap, 0x2828, 0x8383, 0xCECE).reply().pixel # Color "#2883ce"
conn.core.CreateWindow(xcffib.CopyFromParent, window, screen.root,
100, 100, 100, 100, 1,
xcffib.xproto.WindowClass.InputOutput, screen.root_visual,
xcffib.xproto.CW.BackPixel | xcffib.xproto.CW.EventMask,
[background, xcffib.xproto.EventMask.StructureNotify | xcffib.xproto.EventMask.Exposure])
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.Atom.STRING, 8, len(sys.argv[2]),
sys.argv[2])
wm_protocols = "WM_PROTOCOLS"
wm_protocols = conn.core.InternAtom(0, len(wm_protocols), wm_protocols).reply().atom
wm_delete_window = "WM_DELETE_WINDOW"
wm_delete_window = conn.core.InternAtom(0, len(wm_delete_window), wm_delete_window).reply().atom
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, wm_protocols,
xcffib.xproto.Atom.ATOM, 32, 1,
[wm_delete_window])
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
conn.core.MapWindow(window)
conn.flush()
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
try:
while 1:
event = conn.wait_for_event()
if event.__class__ == xcffib.xproto.ClientMessageEvent:
if conn.core.GetAtomName(event.type).reply().name.as_string() == "WM_DELETE_WINDOW":
sys.exit(1)
except xcffib.XcffibException:
pass
| Python | 0.000001 |
7d26429acac78b2b1388a5d069d807038038bd1c | Add a folded indicator | examples/gui_integration/python_editor.py | examples/gui_integration/python_editor.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PCEF - Python/Qt Code Editing Framework
# Copyright 2013, Colin Duquesnoy <colin.duquesnoy@gmail.com>
#
# This software is released under the LGPLv3 license.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Integrates the generic editor using the pcef qt designer plugin.
"""
import logging
logging.basicConfig(level=logging.INFO)
import os
import sys
os.environ.setdefault("QT_API", "PyQt")
from pcef.qt import QtCore, QtGui
from pcef.core import FoldingIndicator
if sys.version_info[0] == 3:
from examples.gui_integration.ui.python_editor_ui3 import Ui_MainWindow
logging.info("Using python3")
else:
from examples.gui_integration.ui.python_editor_ui import Ui_MainWindow
logging.info("Using python2")
class PythonEditorWindow(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.editor.foldingPanel.addIndicator(FoldingIndicator(22, 30))
self.editor.foldingPanel.addIndicator(FoldingIndicator(25, 28))
fi = FoldingIndicator(50, 60)
fi.state = fi.FOLDED
self.editor.foldingPanel.addIndicator(fi)
self.editor.dirtyChanged.connect(self.actionSave.setEnabled)
self.actionSave.triggered.connect(self.editor.saveToFile)
mnu = QtGui.QMenu("Edit", self.menubar)
mnu.addActions(self.editor.actions())
self.menubar.addMenu(mnu)
# Add modes to the modes menu
for k, v in self.editor.modes().items():
a = QtGui.QAction(self.menuModes)
a.setText(k)
a.setCheckable(True)
a.setChecked(True)
a.changed.connect(self.onModeCheckStateChanged)
a.mode = v
self.menuModes.addAction(a)
# Add panels to the panels menu
for zones, panel_dic in self.editor.panels().items():
for k, v in panel_dic.items():
a = QtGui.QAction(self.menuModes)
a.setText(k)
a.setCheckable(True)
a.setChecked(True)
a.changed.connect(self.onPanelCheckStateChanged)
a.panel = v
self.menuPanels.addAction(a)
try:
self.editor.openFile(__file__)
except (OSError, IOError) as e:
pass
except AttributeError:
pass
@QtCore.Slot()
def on_actionOpen_triggered(self):
filePath = QtGui.QFileDialog.getOpenFileName(
self, "Choose a file", os.path.expanduser("~"))
if filePath:
self.editor.openFile(filePath)
def onPanelCheckStateChanged(self):
action = self.sender()
action.panel.enabled = action.isChecked()
def onModeCheckStateChanged(self):
action = self.sender()
action.mode.enabled = action.isChecked()
def main():
app = QtGui.QApplication(sys.argv)
win = PythonEditorWindow()
win.show()
app.exec_()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PCEF - Python/Qt Code Editing Framework
# Copyright 2013, Colin Duquesnoy <colin.duquesnoy@gmail.com>
#
# This software is released under the LGPLv3 license.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Integrates the generic editor using the pcef qt designer plugin.
"""
import logging
logging.basicConfig(level=logging.INFO)
import os
import sys
os.environ.setdefault("QT_API", "PyQt")
from pcef.qt import QtCore, QtGui
from pcef.core import FoldingIndicator
if sys.version_info[0] == 3:
from examples.gui_integration.ui.python_editor_ui3 import Ui_MainWindow
logging.info("Using python3")
else:
from examples.gui_integration.ui.python_editor_ui import Ui_MainWindow
logging.info("Using python2")
class PythonEditorWindow(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.editor.foldingPanel.addIndicator(FoldingIndicator(22, 30))
self.editor.foldingPanel.addIndicator(FoldingIndicator(25, 28))
self.editor.foldingPanel.addIndicator(FoldingIndicator(50, 60))
self.editor.dirtyChanged.connect(self.actionSave.setEnabled)
self.actionSave.triggered.connect(self.editor.saveToFile)
mnu = QtGui.QMenu("Edit", self.menubar)
mnu.addActions(self.editor.actions())
self.menubar.addMenu(mnu)
# Add modes to the modes menu
for k, v in self.editor.modes().items():
a = QtGui.QAction(self.menuModes)
a.setText(k)
a.setCheckable(True)
a.setChecked(True)
a.changed.connect(self.onModeCheckStateChanged)
a.mode = v
self.menuModes.addAction(a)
# Add panels to the panels menu
for zones, panel_dic in self.editor.panels().items():
for k, v in panel_dic.items():
a = QtGui.QAction(self.menuModes)
a.setText(k)
a.setCheckable(True)
a.setChecked(True)
a.changed.connect(self.onPanelCheckStateChanged)
a.panel = v
self.menuPanels.addAction(a)
try:
self.editor.openFile(__file__)
except (OSError, IOError) as e:
pass
except AttributeError:
pass
@QtCore.Slot()
def on_actionOpen_triggered(self):
filePath = QtGui.QFileDialog.getOpenFileName(
self, "Choose a file", os.path.expanduser("~"))
if filePath:
self.editor.openFile(filePath)
def onPanelCheckStateChanged(self):
action = self.sender()
action.panel.enabled = action.isChecked()
def onModeCheckStateChanged(self):
action = self.sender()
action.mode.enabled = action.isChecked()
def main():
app = QtGui.QApplication(sys.argv)
win = PythonEditorWindow()
win.show()
app.exec_()
if __name__ == "__main__":
main()
| Python | 0.000001 |
147a24ea0ba9da03b3774b7993e20e785776e027 | Use sys.nstates in stead of using A.shape[0] | control/passivity.py | control/passivity.py | '''
Author: Mark Yeatman
Date: May 15, 2022
'''
from . import statesp as ss
import numpy as np
import cvxopt as cvx
def is_passive(sys):
'''
Indicates if a linear time invarient system is passive
Constructs a linear matrix inequality and a feasibility optimization
such that is a solution exists, the system is passive.
The source for the algorithm is:
McCourt, Michael J., and Panos J. Antsaklis. "Demonstrating passivity and dissipativity using computational methods." ISIS 8 (2013).
'''
A = sys.A
B = sys.B
C = sys.C
D = sys.D
def make_LMI_matrix(P):
V = np.vstack((
np.hstack((A.T @ P + P@A, P@B)),
np.hstack((B.T@P, np.zeros_like(D))))
)
return V
P = np.zeros_like(A)
matrix_list = []
state_space_size = sys.nstates
for i in range(0, state_space_size):
for j in range(0, state_space_size):
if j <= i:
P = P*0.0
P[i, j] = 1.0
P[j, i] = 1.0
matrix_list.append(make_LMI_matrix(P).flatten())
coefficents = np.vstack(matrix_list).T
constants = -np.vstack((
np.hstack((np.zeros_like(A), - C.T)),
np.hstack((- C, -D - D.T)))
)
number_of_opt_vars = int(
(state_space_size**2-state_space_size)/2 + state_space_size)
c = cvx.matrix(0.0, (number_of_opt_vars, 1))
# crunch feasibility solution
sol = cvx.solvers.sdp(c,
Gs=[cvx.matrix(coefficents)],
hs=[cvx.matrix(constants)])
return (sol["x"] is not None)
| '''
Author: Mark Yeatman
Date: May 15, 2022
'''
from . import statesp as ss
import numpy as np
import cvxopt as cvx
def is_passive(sys):
'''
Indicates if a linear time invarient system is passive
Constructs a linear matrix inequality and a feasibility optimization
such that is a solution exists, the system is passive.
The source for the algorithm is:
McCourt, Michael J., and Panos J. Antsaklis. "Demonstrating passivity and dissipativity using computational methods." ISIS 8 (2013).
'''
A = sys.A
B = sys.B
C = sys.C
D = sys.D
def make_LMI_matrix(P):
V = np.vstack((
np.hstack((A.T @ P + P@A, P@B)),
np.hstack((B.T@P, np.zeros_like(D))))
)
return V
P = np.zeros_like(A)
matrix_list = []
state_space_size = A.shape[0]
for i in range(0, state_space_size):
for j in range(0, state_space_size):
if j <= i:
P = P*0.0
P[i, j] = 1.0
P[j, i] = 1.0
matrix_list.append(make_LMI_matrix(P).flatten())
coefficents = np.vstack(matrix_list).T
constants = -np.vstack((
np.hstack((np.zeros_like(A), - C.T)),
np.hstack((- C, -D - D.T)))
)
number_of_opt_vars = int(
(state_space_size**2-state_space_size)/2 + state_space_size)
c = cvx.matrix(0.0, (number_of_opt_vars, 1))
# crunch feasibility solution
sol = cvx.solvers.sdp(c,
Gs=[cvx.matrix(coefficents)],
hs=[cvx.matrix(constants)])
return (sol["x"] is not None)
| Python | 0.000008 |
b8e556871ff4aff9b85c67cc010814a0e6f60386 | Add new constants and change existing file names. | const.py | const.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module defines constant values for the ScrambleSuit protocol.
While some values can be changed, in general they should not. If you do not
obey, be at least careful because the protocol could easily break.
"""
# Length of the HMAC used to authenticate the ticket.
HMAC_KEY_LENGTH = 32
# Length of the AES key used to encrypt the ticket.
AES_KEY_LENGTH = 16
# FIXME - Directory where long-lived information is stored.
DATA_DIRECTORY = "/tmp/"
# Divisor (in seconds) for the UNIX epoch used to defend against replay
# attacks.
EPOCH_GRANULARITY = 3600
# Flags which can be set in a ScrambleSuit protocol message.
FLAG_PAYLOAD = (1 << 0)
FLAG_NEW_TICKET = (1 << 1)
FLAG_CONFIRM_TICKET = (1 << 2)
FLAG_PRNG_SEED = (1 << 3)
# Length of ScrambleSuit's header in bytes.
HDR_LENGTH = 16 + 2 + 2 + 1
# Length of the HMAC-SHA256-128 in bytes.
HMAC_LENGTH = 16
# Key rotation time for session ticket keys in seconds.
KEY_ROTATION_TIME = 60 * 60 * 24 * 7
# File where session ticket keys are stored.
KEY_STORE = DATA_DIRECTORY + "ticket_keys.pickle"
# Marker used to easily locate the HMAC authenticating handshake messages in
# bytes.
MARKER_LENGTH = 16
# Key length for the master key in bytes.
MASTER_KEY_LENGTH = 32
# The maximum amount of padding to be appended to handshake data.
MAX_PADDING_LENGTH = 4096
# Length of ScrambleSuit's MTU in bytes.
MTU = 1460
# Maximum payload unit of a ScrambleSuit message in bytes.
MPU = MTU - HDR_LENGTH
# Length of a UniformDH public key.
PUBLIC_KEY_LENGTH = 192
# Length of the PRNG seed used to generate probability distributions.
PRNG_SEED_LENGTH = 32
# Files which hold the replay dictionaries.
UNIFORMDH_REPLAY_FILE = DATA_DIRECTORY + "uniformdh_replay_dict.pickle"
TICKET_REPLAY_FILE = DATA_DIRECTORY + "ticket_replay_dict.pickle"
# File which holds the server's state information.
SERVER_STATE_FILE = DATA_DIRECTORY + "server_state.pickle"
# Life time of session tickets in seconds.
SESSION_TICKET_LIFETIME = 60 * 60 * 24 * 7
# SHA256's digest length in bytes.
SHA256_DIGEST_LENGTH = 32
# The length of the UniformDH shared secret in bytes.
SHARED_SECRET_LENGTH = 32
# States which are used for the protocol state machine.
ST_WAIT_FOR_AUTH = 0
ST_CONNECTED = 1
# File which holds our session ticket.
# FIXME - multiple session tickets for multiple servers must be supported.
TICKET_FILE = DATA_DIRECTORY + "session_ticket.pickle"
# Length of a session ticket in bytes.
TICKET_LENGTH = 112
# The protocol name which is used in log messages.
TRANSPORT_NAME = "ScrambleSuit"
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module defines constant values for the ScrambleSuit protocol.
While some values can be changed, in general they should not. If you do not
obey, be at least careful because the protocol could easily break.
"""
# FIXME - Directory where long-lived information is stored.
DATA_DIRECTORY = "/tmp/"
# Divisor (in seconds) for the UNIX epoch used to defend against replay
# attacks.
EPOCH_GRANULARITY = 3600
# Flags which can be set in a ScrambleSuit protocol message.
FLAG_PAYLOAD = (1 << 0)
FLAG_NEW_TICKET = (1 << 1)
FLAG_CONFIRM_TICKET = (1 << 2)
# Length of ScrambleSuit's header in bytes.
HDR_LENGTH = 16 + 2 + 2 + 1
# Length of the HMAC-SHA256-128 in bytes.
HMAC_LENGTH = 16
# Key rotation time for session ticket keys in seconds.
KEY_ROTATION_TIME = 60 * 60 * 24 * 7
# File where session ticket keys are stored.
KEY_STORE = DATA_DIRECTORY + "ticket_keys.bin"
# Marker used to easily locate the HMAC authenticating handshake messages in
# bytes.
MARKER_LENGTH = 16
# Key length for the master key in bytes.
MASTER_KEY_LENGTH = 32
# The maximum amount of padding to be appended to handshake data.
MAX_PADDING_LENGTH = 4096
# Length of ScrambleSuit's MTU in bytes.
MTU = 1460
# Maximum payload unit of a ScrambleSuit message in bytes.
MPU = MTU - HDR_LENGTH
# Length of a UniformDH public key.
PUBLIC_KEY_LENGTH = 192
# Files which hold the replay dictionaries.
UNIFORMDH_REPLAY_FILE = DATA_DIRECTORY + "uniformdh_replay_dict.pickle"
TICKET_REPLAY_FILE = DATA_DIRECTORY + "ticket_replay_dict.pickle"
# Life time of session tickets in seconds.
SESSION_TICKET_LIFETIME = 60 * 60 * 24 * 7
# SHA256's digest length in bytes.
SHA256_DIGEST_LENGTH = 32
# The length of the UniformDH shared secret in bytes.
SHARED_SECRET_LENGTH = 32
# States which are used for the protocol state machine.
ST_WAIT_FOR_AUTH = 0
ST_CONNECTED = 1
# File which holds our session ticket.
# FIXME - multiple session tickets for multiple servers must be supported.
TICKET_FILE = DATA_DIRECTORY + "session_ticket.bin"
# Length of a session ticket in bytes.
TICKET_LENGTH = 112
# The protocol name which is used in log messages.
TRANSPORT_NAME = "ScrambleSuit"
| Python | 0 |
84bcae49475d0d0ce0c14d671b363c488d93bb9f | Add skip reason | fmriprep/workflows/bold/tests/test_util.py | fmriprep/workflows/bold/tests/test_util.py | ''' Testing module for fmriprep.workflows.bold.util '''
import pytest
import os
import numpy as np
from nipype.utils.filemanip import fname_presuffix
from nilearn.image import load_img
from ..util import init_bold_reference_wf
def symmetric_overlap(img1, img2):
mask1 = load_img(img1).get_data() > 0
mask2 = load_img(img2).get_data() > 0
total1 = np.sum(mask1)
total2 = np.sum(mask2)
overlap = np.sum(mask1 & mask2)
return overlap / np.sqrt(total1 * total2)
@pytest.mark.skipif(not os.getenv('FMRIPREP_REGRESSION_SOURCE') or
not os.getenv('FMRIPREP_REGRESSION_TARGETS'),
reason='FMRIPREP_REGRESSION_{SOURCE,TARGETS} env vars not set')
@pytest.mark.parametrize('input_fname,expected_fname', [
(os.path.join(os.getenv('FMRIPREP_REGRESSION_SOURCE', ''),
base_fname),
fname_presuffix(base_fname, suffix='_mask', use_ext=True,
newpath=os.getenv('FMRIPREP_REGRESSION_TARGETS', '')))
for base_fname in (
'ds000116/sub-12_task-visualoddballwithbuttonresponsetotargetstimuli_run-02_bold.nii.gz',
# 'ds000133/sub-06_ses-post_task-rest_run-01_bold.nii.gz',
# 'ds000140/sub-32_task-heatpainwithregulationandratings_run-02_bold.nii.gz',
# 'ds000157/sub-23_task-passiveimageviewing_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-1_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-2_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-3_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-1_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-2_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-3_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-4_bold.nii.gz',
# 'ds000237/sub-03_task-MemorySpan_acq-multiband_run-01_bold.nii.gz',
# 'ds000237/sub-06_task-MemorySpan_acq-multiband_run-01_bold.nii.gz',
)
])
def test_masking(input_fname, expected_fname):
bold_reference_wf = init_bold_reference_wf(enhance_t2=True)
bold_reference_wf.inputs.inputnode.bold_file = input_fname
res = bold_reference_wf.run()
combine_masks = [node for node in res.nodes if node.name.endswith('combine_masks')][0]
overlap = symmetric_overlap(expected_fname,
combine_masks.result.outputs.out_file)
assert overlap > 0.95, input_fname
| ''' Testing module for fmriprep.workflows.bold.util '''
import pytest
import os
import numpy as np
from nipype.utils.filemanip import fname_presuffix
from nilearn.image import load_img
from ..util import init_bold_reference_wf
def symmetric_overlap(img1, img2):
mask1 = load_img(img1).get_data() > 0
mask2 = load_img(img2).get_data() > 0
total1 = np.sum(mask1)
total2 = np.sum(mask2)
overlap = np.sum(mask1 & mask2)
return overlap / np.sqrt(total1 * total2)
@pytest.mark.skipif(not os.getenv('FMRIPREP_REGRESSION_SOURCE') or
not os.getenv('FMRIPREP_REGRESSION_TARGETS'))
@pytest.mark.parametrize('input_fname,expected_fname', [
(os.path.join(os.getenv('FMRIPREP_REGRESSION_SOURCE', ''),
base_fname),
fname_presuffix(base_fname, suffix='_mask', use_ext=True,
newpath=os.getenv('FMRIPREP_REGRESSION_TARGETS', '')))
for base_fname in (
'ds000116/sub-12_task-visualoddballwithbuttonresponsetotargetstimuli_run-02_bold.nii.gz',
# 'ds000133/sub-06_ses-post_task-rest_run-01_bold.nii.gz',
# 'ds000140/sub-32_task-heatpainwithregulationandratings_run-02_bold.nii.gz',
# 'ds000157/sub-23_task-passiveimageviewing_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-1_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-2_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-3_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-1_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-2_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-3_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-4_bold.nii.gz',
# 'ds000237/sub-03_task-MemorySpan_acq-multiband_run-01_bold.nii.gz',
# 'ds000237/sub-06_task-MemorySpan_acq-multiband_run-01_bold.nii.gz',
)
])
def test_masking(input_fname, expected_fname):
bold_reference_wf = init_bold_reference_wf(enhance_t2=True)
bold_reference_wf.inputs.inputnode.bold_file = input_fname
res = bold_reference_wf.run()
combine_masks = [node for node in res.nodes if node.name.endswith('combine_masks')][0]
overlap = symmetric_overlap(expected_fname,
combine_masks.result.outputs.out_file)
assert overlap > 0.95, input_fname
| Python | 0.000001 |
2a030ce151cdb6eaaa3933bd7f958edf658ab209 | Make the parent directory part of the Python path for custom management commands to work. | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.abspath(".."))
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| #!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python | 0 |
0feb5947af0dacc53ba624723593dd88b0b4653a | Fix shop creation | byceps/services/shop/shop/service.py | byceps/services/shop/shop/service.py | """
byceps.services.shop.shop.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Optional
from ....database import db
from ....typing import PartyID
from .models import Shop as DbShop
from .transfer.models import Shop, ShopID
def create_shop(party_id: PartyID) -> Shop:
"""Create a shop."""
shop = DbShop(party_id, party_id)
db.session.add(shop)
db.session.commit()
return _db_entity_to_shop(shop)
def find_shop(shop_id: ShopID) -> Optional[Shop]:
"""Return the shop with that id, or `None` if not found."""
shop = DbShop.query.get(shop_id)
return _db_entity_to_shop(shop)
def _db_entity_to_shop(shop: DbShop) -> Shop:
return Shop(
shop.id,
shop.party_id,
)
| """
byceps.services.shop.shop.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Optional
from ....database import db
from ....typing import PartyID
from .models import Shop as DbShop
from .transfer.models import Shop, ShopID
def create_shop(party_id: PartyID) -> Shop:
"""Create a shop."""
shop = DbShop(party_id)
db.session.add(shop)
db.session.commit()
return _db_entity_to_shop(shop)
def find_shop(shop_id: ShopID) -> Optional[Shop]:
"""Return the shop with that id, or `None` if not found."""
shop = DbShop.query.get(shop_id)
return _db_entity_to_shop(shop)
def _db_entity_to_shop(shop: DbShop) -> Shop:
return Shop(
shop.id,
shop.party_id,
)
| Python | 0.000001 |
c0e903c3dab9fea0594d023ab9c049ca408bd9a4 | Cover text: outlined | cover.py | cover.py |
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
import textwrap
def make_cover(title, author, width=600, height=800, fontname="Helvetica", fontsize=40, bgcolor=(120, 20, 20), textcolor=(255, 255, 255), wrapat=30):
img = Image.new("RGBA", (width, height), bgcolor)
draw = ImageDraw.Draw(img)
title = textwrap.fill(title, wrapat)
author = textwrap.fill(author, wrapat)
font = ImageFont.truetype(font=fontname, size=fontsize)
title_size = draw.textsize(title, font=font)
draw_text_outlined(draw, ((width - title_size[0]) / 2, 100), title, textcolor, font=font)
# draw.text(((width - title_size[0]) / 2, 100), title, textcolor, font=font)
font = ImageFont.truetype(font=fontname, size=fontsize - 2)
author_size = draw.textsize(author, font=font)
draw_text_outlined(draw, ((width - author_size[0]) / 2, 100 + title_size[1] + 70), author, textcolor, font=font)
output = BytesIO()
img.save(output, "PNG")
output.name = 'cover.png'
# writing left the cursor at the end of the file, so reset it
output.seek(0)
return output
def draw_text_outlined(draw, xy, text, fill=None, font=None, anchor=None):
x, y = xy
# Outline
draw.text((x - 1, y), text=text, fill=(0, 0, 0), font=font, anchor=anchor)
draw.text((x + 1, y), text=text, fill=(0, 0, 0), font=font, anchor=anchor)
draw.text((x, y - 1), text=text, fill=(0, 0, 0), font=font, anchor=anchor)
draw.text((x, y + 1), text=text, fill=(0, 0, 0), font=font, anchor=anchor)
# Fill
draw.text(xy, text=text, fill=fill, font=font, anchor=anchor)
if __name__ == '__main__':
f = make_cover('Test of a Title which is quite long and will require multiple lines', 'Some Dude')
with open('output.png', 'wb') as out:
out.write(f.read())
|
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
import textwrap
def make_cover(title, author, width=600, height=800, fontname="Helvetica", fontsize=40, bgcolor=(120, 20, 20), textcolor=(255, 255, 255), wrapat=30):
img = Image.new("RGBA", (width, height), bgcolor)
draw = ImageDraw.Draw(img)
title = textwrap.fill(title, wrapat)
author = textwrap.fill(author, wrapat)
font = ImageFont.truetype(font=fontname, size=fontsize)
title_size = draw.textsize(title, font=font)
draw.text(((width - title_size[0]) / 2, 100), title, textcolor, font=font)
font = ImageFont.truetype(font=fontname, size=fontsize - 2)
author_size = draw.textsize(author, font=font)
draw.text(((width - author_size[0]) / 2, 100 + title_size[1] + 70), author, textcolor, font=font)
draw = ImageDraw.Draw(img)
output = BytesIO()
img.save(output, "PNG")
output.name = 'cover.png'
# writing left the cursor at the end of the file, so reset it
output.seek(0)
return output
if __name__ == '__main__':
f = make_cover('Test of a Title which is quite long and will require multiple lines', 'Some Dude')
with open('output.png', 'wb') as out:
out.write(f.read())
| Python | 0.999509 |
99bfdd1f038865a3558e212777b0a5641d87c170 | Add report types to NEA. | inspectors/nea.py | inspectors/nea.py | #!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://arts.gov/oig
# Oldest report: 2005
# options:
# standard since/year options for a year range to fetch from.
# report_id: only bother to process a single report
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://arts.gov/oig/reports/audits"
SPECIAL_REVIEWS_URL = "http://arts.gov/oig/reports/specials"
SEMIANNUAL_REPORTS_URL = "http://arts.gov/oig/reports/semi-annual"
PEER_REVIEWS_URL = "http://arts.gov/oig/reports/external-peer-reviews"
FISMA_REPORTS_URL = "http://arts.gov/oig/reports/fisma"
REPORT_URLS = {
"audit": AUDIT_REPORTS_URL,
"evaluation": SPECIAL_REVIEWS_URL,
"semiannual_report": SEMIANNUAL_REPORTS_URL,
"peer_review": PEER_REVIEWS_URL,
"fisma": FISMA_REPORTS_URL,
}
def run(options):
year_range = inspector.year_range(options)
only_report_id = options.get('report_id')
# Pull the reports
for report_type, url in REPORT_URLS.items():
doc = BeautifulSoup(utils.download(url))
results = doc.select("div.field-item li")
for result in results:
report = report_from(result, url, report_type, year_range)
if report:
# debugging convenience: can limit to single report
if only_report_id and (report['report_id'] != only_report_id):
continue
inspector.save_report(report)
def report_from(result, landing_url, report_type, year_range):
link = result.find("a")
if not link:
return
title = link.text
report_url = urljoin(landing_url, link.get('href'))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
estimated_date = False
try:
published_on_text = title.split("-")[-1].split("–")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
except ValueError:
# For reports where we can only find the year, set them to Nov 1st of that year
try:
published_on_year = int(result.find_previous("h3").text.strip())
except AttributeError:
published_on_year = int(re.search('(\d+)', title).group())
published_on = datetime.datetime(published_on_year, 11, 1)
estimated_date = True
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'nea',
'inspector_url': 'http://arts.gov/oig',
'agency': 'nea',
'agency_name': 'National Endowment for the Arts',
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if estimated_date:
report['estimated_date'] = estimated_date
return report
utils.run(run) if (__name__ == "__main__") else None
| #!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://arts.gov/oig
# Oldest report: 2005
# options:
# standard since/year options for a year range to fetch from.
# report_id: only bother to process a single report
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://arts.gov/oig/reports/audits"
SPECIAL_REVIEWS_URL = "http://arts.gov/oig/reports/specials"
SEMIANNUAL_REPORTS_URL = "http://arts.gov/oig/reports/semi-annual"
PEER_REVIEWS_URL = "http://arts.gov/oig/reports/external-peer-reviews"
FISMA_REPORTS_URL = "http://arts.gov/oig/reports/fisma"
REPORT_URLS = [
AUDIT_REPORTS_URL,
SPECIAL_REVIEWS_URL,
SEMIANNUAL_REPORTS_URL,
PEER_REVIEWS_URL,
FISMA_REPORTS_URL,
]
def run(options):
year_range = inspector.year_range(options)
only_report_id = options.get('report_id')
# Pull the reports
for url in REPORT_URLS:
doc = BeautifulSoup(utils.download(url))
results = doc.select("div.field-item li")
for result in results:
report = report_from(result, url, year_range)
if report:
# debugging convenience: can limit to single report
if only_report_id and (report['report_id'] != only_report_id):
continue
inspector.save_report(report)
def report_from(result, landing_url, year_range):
link = result.find("a")
if not link:
return
title = link.text
report_url = urljoin(landing_url, link.get('href'))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
estimated_date = False
try:
published_on_text = title.split("-")[-1].split("–")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
except ValueError:
# For reports where we can only find the year, set them to Nov 1st of that year
try:
published_on_year = int(result.find_previous("h3").text.strip())
except AttributeError:
published_on_year = int(re.search('(\d+)', title).group())
published_on = datetime.datetime(published_on_year, 11, 1)
estimated_date = True
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'nea',
'inspector_url': 'http://arts.gov/oig',
'agency': 'nea',
'agency_name': 'National Endowment for the Arts',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if estimated_date:
report['estimated_date'] = estimated_date
return report
utils.run(run) if (__name__ == "__main__") else None
| Python | 0 |
91a8c5312c58edff070915fc5d182b35f60ef0fa | allow for error recovery in cases where a file sync fails. | sync-dropbox-to-ftp.py | sync-dropbox-to-ftp.py | #!/usr/bin/env python3
"""The initial options file should look like:
{
"state": { "cursor": null, "left": [] },
"options": {
"ftp": {
"auth": { "host": "FTP_HOST", "user": "FTP_USER", "passwd": "FTP_PASSWORD" },
"path": "FTP_DIRECTORY_NAME"
},
"dropbox": {
"auth": { "access_token": "ACCESS_TOKEN" },
"path": "DROPBOX_DIRECTORY_NAME"
}
}
}
"""
import argparse
import contextlib
import ftplib
import json
import os.path
import sys
import urllib.parse
import urllib.request
class Dropbox:
def __init__(self, access_token):
self._access_token = access_token
def _urlopen(self, url, data=None, headers={}):
headers['Authorization'] = 'Bearer ' + self._access_token
if data is not None:
data = urllib.parse.urlencode(data).encode()
request = urllib.request.Request(url=url, data=data, headers=headers)
return urllib.request.urlopen(request, cadefault=True)
def get_added_files(self, path, cursor):
# http.client.HTTPConnection.debuglevel = 1
data = {'path_prefix': path}
deltas = {'has_more': True}
while deltas['has_more']:
if cursor:
data['cursor'] = cursor
deltas = json.loads(
self._urlopen(url='https://api.dropbox.com/1/delta', data=data)
.read().decode())
added_files = [entry[0] for entry in deltas['entries'] if entry[1]]
cursor = deltas['cursor']
return added_files, cursor
def get_file(self, path):
return self._urlopen('https://api-content.dropbox.com/1/files/auto/' + path)
@contextlib.contextmanager
def Ftp(host, user, passwd):
with ftplib.FTP(host=host) as ftp:
ftp.login(user=user, passwd=passwd)
class Actions:
def __init__(self, ftp):
self._ftp = ftp
def upload(self, path, f, callback=lambda num_bytes: None):
num_bytes = [0]
def cb(buff):
num_bytes[0] += len(buff)
callback(num_bytes[0])
self._ftp.storbinary('STOR ' + os.path.join(self._ftp.pwd(), path), f, callback=cb)
yield Actions(ftp)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--conf", default="~/.sync-dropbox-to-ftp.conf")
parser.add_argument("--noupdate", action="store_true", default=False,
help="Do not update the configuration file.")
args = parser.parse_args()
conf_file = os.path.expanduser(args.conf)
with open(conf_file) as f:
conf = json.load(f)
options = conf['options']
state = conf['state']
db = Dropbox(**options['dropbox']['auth'])
left, state['left'] = state['left'], []
if not left:
left, state['cursor'] = db.get_added_files(options['dropbox']['path'], state['cursor'])
with Ftp(**options['ftp']['auth']) as ftp:
for path in left:
try:
def display(num_bytes):
print('{}: Uploaded {} bytes...'.format(path, num_bytes), end='\r', file=sys.stderr, flush=True)
display(0)
ftp.upload(os.path.join(options['ftp']['path'], os.path.basename(path)),
db.get_file(path),
callback=display)
except Exception as e:
print('\n{}: Failed with {}'.format(path, e), file=sys.stderr, flush=True)
state['left'].append(path)
else:
print('\n{}: Done'.format(path), file=sys.stderr, flush=True)
print(path, flush=True)
if not args.noupdate:
with open(conf_file, 'w') as f:
json.dump(conf, f, indent=2)
| #!/usr/bin/env python3
"""The initial options file should look like:
{
"state": { "cursor": null },
"options": {
"ftp": {
"auth": { "host": "FTP_HOST", "user": "FTP_USER", "passwd": "FTP_PASSWORD" },
"path": "FTP_DIRECTORY_NAME"
},
"dropbox": {
"auth": { "access_token": "ACCESS_TOKEN" },
"path": "DROPBOX_DIRECTORY_NAME"
}
}
}
"""
import argparse
import contextlib
import ftplib
import json
import os.path
import sys
import urllib.parse
import urllib.request
class Dropbox:
def __init__(self, access_token):
self._access_token = access_token
def _urlopen(self, url, data=None, headers={}):
headers['Authorization'] = 'Bearer ' + self._access_token
if data is not None:
data = urllib.parse.urlencode(data).encode()
request = urllib.request.Request(url=url, data=data, headers=headers)
return urllib.request.urlopen(request, cadefault=True)
def get_added_files(self, path, cursor):
# http.client.HTTPConnection.debuglevel = 1
data = {'path_prefix': path}
deltas = {'has_more': True}
while deltas['has_more']:
if cursor:
data['cursor'] = cursor
deltas = json.loads(
self._urlopen(url='https://api.dropbox.com/1/delta', data=data)
.read().decode())
added_files = [entry[0] for entry in deltas['entries'] if entry[1]]
cursor = deltas['cursor']
return added_files, cursor
def get_file(self, path):
return self._urlopen('https://api-content.dropbox.com/1/files/auto/' + path)
@contextlib.contextmanager
def Ftp(host, user, passwd):
with ftplib.FTP(host=host) as ftp:
ftp.login(user=user, passwd=passwd)
class Actions:
def __init__(self, ftp):
self._ftp = ftp
def upload(self, path, f, callback=lambda num_bytes: None):
num_bytes = [0]
def cb(buff):
num_bytes[0] += len(buff)
callback(num_bytes[0])
self._ftp.storbinary('STOR ' + os.path.join(self._ftp.pwd(), path), f, callback=cb)
yield Actions(ftp)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--conf", default="~/.sync-dropbox-to-ftp.conf")
parser.add_argument("--noupdate", action="store_true", default=False,
help="Do not update the configuration file.")
args = parser.parse_args()
conf_file = os.path.expanduser(args.conf)
with open(conf_file) as f:
conf = json.load(f)
options = conf['options']
state = conf['state']
db = Dropbox(**options['dropbox']['auth'])
added_files, state['cursor'] = db.get_added_files(options['dropbox']['path'], state['cursor'])
with Ftp(**options['ftp']['auth']) as ftp:
for path in added_files:
def display(num_bytes):
print('{}: Uploaded {} bytes...'.format(path, num_bytes),
end='\r', file=sys.stderr, flush=True)
display(0)
ftp.upload(os.path.join(options['ftp']['path'], os.path.basename(path)),
db.get_file(path),
callback=display)
print(path)
if not args.noupdate:
with open(conf_file, 'w') as f:
json.dump(conf, f, indent=2)
| Python | 0 |
5f15ad2da19cc3872b5e6fbbaa5db8b902cef720 | Revert "we don't want this commit" | manage.py | manage.py | #!/usr/bin/env python
"""
Usage: manage.py {lms|cms} [--settings env] ...
Run django management commands. Because edx-platform contains multiple django projects,
the first argument specifies which project to run (cms [Studio] or lms [Learning Management System]).
By default, those systems run in with a settings file appropriate for development. However,
by passing the --settings flag, you can specify what environment specific settings file to use.
Any arguments not understood by this manage.py will be passed to django-admin.py
"""
import os
import sys
import importlib
from argparse import ArgumentParser
def parse_args():
"""Parse edx specific arguments to manage.py"""
parser = ArgumentParser()
subparsers = parser.add_subparsers(title='system', description='edX service to run')
lms = subparsers.add_parser(
'lms',
help='Learning Management System',
add_help=False,
usage='%(prog)s [options] ...'
)
lms.add_argument('-h', '--help', action='store_true', help='show this help message and exit')
lms.add_argument(
'--settings',
help="Which django settings module to use under lms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to lms.envs.dev")
lms.add_argument(
'--service-variant',
choices=['lms', 'lms-xml', 'lms-preview'],
default='lms',
help='Which service variant to run, when using the aws environment')
lms.set_defaults(
help_string=lms.format_help(),
settings_base='lms/envs',
default_settings='lms.envs.dev',
startup='lms.startup',
)
cms = subparsers.add_parser(
'cms',
help='Studio',
add_help=False,
usage='%(prog)s [options] ...'
)
cms.add_argument(
'--settings',
help="Which django settings module to use under cms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to cms.envs.dev")
cms.add_argument('-h', '--help', action='store_true', help='show this help message and exit')
cms.set_defaults(
help_string=cms.format_help(),
settings_base='cms/envs',
default_settings='cms.envs.dev',
service_variant='cms',
startup='cms.startup',
)
edx_args, django_args = parser.parse_known_args()
if edx_args.help:
print "edX:"
print edx_args.help_string
return edx_args, django_args
if __name__ == "__main__":
edx_args, django_args = parse_args()
if edx_args.settings:
os.environ["DJANGO_SETTINGS_MODULE"] = edx_args.settings_base.replace('/', '.') + "." + edx_args.settings
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", edx_args.default_settings)
os.environ.setdefault("SERVICE_VARIANT", edx_args.service_variant)
if edx_args.help:
print "Django:"
# This will trigger django-admin.py to print out its help
django_args.append('--help')
startup = importlib.import_module(edx_args.startup)
startup.run()
from django.core.management import execute_from_command_line
execute_from_command_line([sys.argv[0]] + django_args)
| #!/usr/bin/env python
"""
Usage: manage.py {lms|cms} [--settings env] ...
Run django management commands. Because edx-platform contains multiple django projects,
the first argument specifies which project to run (cms [Studio] or lms [Learning Management System]).
By default, those systems run in with a settings file appropriate for development. However,
by passing the --settings flag, you can specify what environment specific settings file to use.
Any arguments not understood by this manage.py will be passed to django-admin.py
"""
import os
import sys
import importlib
from argparse import ArgumentParser
def parse_args():
"""Parse edx specific arguments to manage.py"""
parser = ArgumentParser()
subparsers = parser.add_subparsers(title='system', description='edX service to run')
lms = subparsers.add_parser(
'lms',
help='Learning Management System',
add_help=False,
usage='%(prog)s [options] ...'
)
lms.add_argument('-h', '--help', action='store_true', help='show this help message and exit')
lms.add_argument(
'--settings',
help="Which django settings module to use under lms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to lms.envs.dev")
lms.add_argument(
'--service-variant',
choices=['lms', 'lms-xml', 'lms-preview'],
default='lms',
help='Which service variant to run, when using the aws environment')
lms.set_defaults(
help_string=lms.format_help(),
settings_base='lms/envs',
default_settings='lms.envs.dev',
startup='lms.startup',
)
cms = subparsers.add_parser(
'cms',
help='Studio',
add_help=False,
usage='%(prog)s [options] ...'
)
cms.add_argument(
'--settings',
help="Which django settings module to use under cms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to cms.envs.dev")
cms.add_argument('-h', '--help', action='store_true', help='show this help message and exit')
cms.set_defaults(
help_string=cms.format_help(),
settings_base='cms/envs',
default_settings='cms.envs.dev',
service_variant='cms',
startup='cms.startup',
)
edx_args, django_args = parser.parse_known_args()
if edx_args.help:
print "edX:"
print edx_args.help_string
return edx_args, django_args
if __name__ == "__main__":
edx_args, django_args = parse_args()
if edx_args.settings:
os.environ["DJANGO_SETTINGS_MODULE"] = edx_args.settings_base.replace('/', '.') + "." + edx_args.settings
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", edx_args.default_settings)
os.environ.setdefault("SERVICE_VARIANT", edx_args.service_variant)
if edx_args.help:
print "Django:"
# This will trigger django-admin.py to print out its help
django_args.append('--help')
startup = importlib.import_module(edx_args.startup)
startup.run()
from django.core.management import execute_from_command_line
# this is a commited changed,that we dont want
execute_from_command_line([sys.argv[0]] + django_args)
| Python | 0 |
8c233868e82a6828d21574b0d488699c1c7b1443 | Update test_ValueType.py | cairis/cairis/test/test_ValueType.py | cairis/cairis/test/test_ValueType.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
import BorgFactory
from Borg import Borg
from ValueTypeParameters import ValueTypeParameters
class ValueTypeTest(unittest.TestCase):
def setUp(self):
BorgFactory.initialise()
f = open(os.environ['CAIRIS_SRC'] + '/test/valuetypes.json')
d = json.load(f)
f.close()
self.iVtypes = d['valuetypes']
def testValueType(self):
ivt1 = ValueTypeParameters(self.iVtypes[0]["theName"], self.iVtypes[0]["theDescription"], self.iVtypes[0]["theType"])
ivt2 = ValueTypeParameters(self.iVtypes[1]["theName"], self.iVtypes[1]["theDescription"], self.iVtypes[1]["theType"])
b = Borg()
b.dbProxy.addValueType(ivt1)
b.dbProxy.addValueType(ivt2)
oVtypes = b.dbProxy.getValueTypes()
ovt1 = oVtypes[self.iVtypes[0]["theName"]]
self.assertEqual(ivt1.name(), ovt1.name())
self.assertEqual(ivt1.description(),ovt1.description())
self.assertEqual(ivt1.type(),ovt1.type())
ovt2 = oVtypes[self.iVtypes[1]["theName"]]
self.assertEqual(ivt2.name(), ovt2.name())
self.assertEqual(ivt2.description(),ovt2.description())
self.assertEqual(ivt2.type(),ovt2.type())
b.dbProxy.deleteVulnerabilityType(ovt1.id())
b.dbProxy.deleteThreatType(ovt2.id())
def tearDown(self):
b = Borg()
b.dbProxy.close()
if __name__ == '__main__':
unittest.main()
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
import BorgFactory
from Borg import Borg
from ValueTypeParameters import ValueTypeParameters
class ValueTypeTest(unittest.TestCase):
def setUp(self):
BorgFactory.initialise()
f = open(os.environ['CAIRIS_SRC'] + '/test/valuetypes.json')
d = json.load(f)
f.close()
self.iVtypes = d['valuetypes']
def testValueType(self):
ivt1 = ValueTypeParameters(self.iVtypes[0]["theName"], self.iVtypes[0]["theDescription"], self.iVtypes[0]["vulnerability_type"])
ivt2 = ValueTypeParameters(self.iVtypes[1]["theName"], self.iVtypes[1]["theDescription"], self.iVtypes[1]["threat_type"])
b = Borg()
b.dbProxy.addValueType(ivt1)
b.dbProxy.addValueType(ivt2)
oVtypes = b.dbProxy.getValueTypes()
ovt1 = oVtypes[self.iVtypes[0]["theName"]]
self.assertEqual(ivt1.name(), ovt1.name())
self.assertEqual(ivt1.description(),ovt1.description())
self.assertEqual(ivt1.type(),ovt1.type())
ovt2 = oVtypes[self.iVtypes[1]["theName"]]
self.assertEqual(ivt2.name(), ovt2.name())
self.assertEqual(ivt2.description(),ovt2.description())
self.assertEqual(ivt2.type(),ovt2.type())
b.dbProxy.deleteVulnerabilityType(ovt1.id())
b.dbProxy.deleteThreatType(ovt2.id())
def tearDown(self):
b = Borg()
b.dbProxy.close()
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
278c7817120370c22cad2e56471e7d1673312e3e | Use functools.wraps rather than manually setting __name__ | curry.py | curry.py | from functools import wraps
from inspect import signature, isclass
import sys
import unittest
def get_arg_count(fun):
if isclass(fun):
return len(signature(fun.__call__).parameters)
else:
return len(signature(fun).parameters)
def curry(fun):
arg_count = get_arg_count(fun)
@wraps(fun)
def curried(*old_args, **old_kwargs):
args_store = list(old_args)
kwargs_store = old_kwargs
@wraps(fun)
def _inner(*new_args, **new_kwargs):
nonlocal args_store, kwargs_store
new_args = args_store + list(new_args)
kwargs_store.update(new_kwargs)
args_store = new_args
if len(args_store) + len(kwargs_store) == arg_count:
return fun(*args_store, **kwargs_store)
else:
return _inner
return _inner
return curried
class CurryTest(unittest.TestCase):
def test_two_args(self):
add = curry(lambda a, b: a + b)
self.assertEqual(3, add(1)(2))
def test_three_args(self):
add3 = curry(lambda a, b, c: a + b + c)
self.assertEqual(6, add3(1)(2)(3))
def test_args_dont_persist(self):
add = curry(lambda a, b: a + b)
add1 = add(1)
add2 = add(2)
self.assertEqual(2, add1(1))
self.assertEqual(3, add2(1))
def test_mutable_args(self):
def concat(a, b):
ret = []
ret.extend(a)
ret.extend(b)
return ret
concat = curry(concat)
self.assertEqual([1, 2, 3, 4], concat([1, 2])([3, 4]))
def test_builtin(self):
add_1_to_each = curry(map)(lambda x: x + 1)
self.assertEqual([2, 3, 4, 5],
list(add_1_to_each([1, 2, 3, 4])))
def test_positional_kwargs(self):
add_default = curry(lambda a, b=10: a + b)
self.assertEqual(3, add_default(1)(2))
def test_kwargs(self):
@curry
def add(a, *, b):
return a + b
self.assertEqual(12, add(2)(b=10))
def test_preserve_name(self):
def add(a, b): return a + b
add = curry(add)
self.assertEqual('add', add.__name__)
self.assertEqual('add', add(1).__name__)
if __name__ == '__main__':
unittest.main()
| from inspect import signature, isclass
import sys
import unittest
def get_arg_count(fun):
if isclass(fun):
return len(signature(fun.__call__).parameters)
else:
return len(signature(fun).parameters)
def curry(fun):
arg_count = get_arg_count(fun)
def curried(*old_args, **old_kwargs):
args_store = list(old_args)
kwargs_store = old_kwargs
def _inner(*new_args, **new_kwargs):
nonlocal args_store, kwargs_store
new_args = args_store + list(new_args)
kwargs_store.update(new_kwargs)
args_store = new_args
_inner.__name__ = fun.__name__
if len(args_store) + len(kwargs_store) == arg_count:
return fun(*args_store, **kwargs_store)
else:
return _inner
_inner.__name__ = fun.__name__
return _inner
curried.__name__ = fun.__name__
return curried
class CurryTest(unittest.TestCase):
def test_two_args(self):
add = curry(lambda a, b: a + b)
self.assertEqual(3, add(1)(2))
def test_three_args(self):
add3 = curry(lambda a, b, c: a + b + c)
self.assertEqual(6, add3(1)(2)(3))
def test_args_dont_persist(self):
add = curry(lambda a, b: a + b)
add1 = add(1)
add2 = add(2)
self.assertEqual(2, add1(1))
self.assertEqual(3, add2(1))
def test_mutable_args(self):
def concat(a, b):
ret = []
ret.extend(a)
ret.extend(b)
return ret
concat = curry(concat)
self.assertEqual([1, 2, 3, 4], concat([1, 2])([3, 4]))
def test_builtin(self):
add_1_to_each = curry(map)(lambda x: x + 1)
self.assertEqual([2, 3, 4, 5],
list(add_1_to_each([1, 2, 3, 4])))
def test_positional_kwargs(self):
add_default = curry(lambda a, b=10: a + b)
self.assertEqual(3, add_default(1)(2))
def test_kwargs(self):
@curry
def add(a, *, b):
return a + b
self.assertEqual(12, add(2)(b=10))
def test_preserve_name(self):
def add(a, b): return a + b
add = curry(add)
self.assertEqual('add', add.__name__)
self.assertEqual('add', add(1).__name__)
if __name__ == '__main__':
unittest.main()
| Python | 0 |
7cac8f8ba591315d68e223503c4e93f976c8d89d | Set default race and class without extra database queries | characters/views.py | characters/views.py | from django.shortcuts import get_object_or_404, redirect, render
from characters.forms import CharacterForm
from characters.models import Character, Class, Race
def index(request):
all_characters = Character.objects.all()
context = {'all_characters': all_characters}
return render(request, 'characters/index.html', context)
def view_character(request, character_id):
character = get_object_or_404(Character, pk=character_id)
context = {'character': character}
return render(request, 'characters/view_character.html', context)
def create_character(request):
form = CharacterForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
character = Character(
name=request.POST['name'],
background=request.POST['background'],
race_id=1,
cclass_id=1
)
character.save()
return redirect('characters:view', character_id=character.id)
context = {'form': form}
return render(request, 'characters/create_character.html', context)
| from django.shortcuts import get_object_or_404, redirect, render
from characters.forms import CharacterForm
from characters.models import Character, Class, Race
def index(request):
all_characters = Character.objects.all()
context = {'all_characters': all_characters}
return render(request, 'characters/index.html', context)
def view_character(request, character_id):
character = get_object_or_404(Character, pk=character_id)
context = {'character': character}
return render(request, 'characters/view_character.html', context)
def create_character(request):
form = CharacterForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
race = Race.objects.get(id=1)
cclass = Class.objects.get(id=1)
character = Character(
name=request.POST['name'],
background=request.POST['background'],
race=race,
cclass=cclass
)
character.save()
return redirect('characters:view', character_id=character.id)
context = {'form': form}
return render(request, 'characters/create_character.html', context)
| Python | 0 |
daabf57935c9bec91ee4ce0bcd4713790fe928ea | use celery | daily.py | daily.py | from totalimpactwebapp.user import User
from totalimpactwebapp import db
import datetime
import tasks
"""
requires these env vars be set in this environment:
DATABASE_URL
"""
def page_query(q):
offset = 0
while True:
r = False
for elem in q.limit(100).offset(offset):
r = True
yield elem
offset += 100
if not r:
break
def add_profile_deets_for_everyone():
for user in page_query(User.query.order_by(User.url_slug.asc())):
print user.url_slug
tasks.add_profile_deets.delay(user)
def deduplicate_everyone():
for user in page_query(User.query.order_by(User.url_slug.asc())):
print user.url_slug
removed_tiids = tasks.deduplicate.delay(user)
def put_linked_account_users_on_queue():
i = 0
# now = datetime.datetime.utcnow().isoformat()
now = "2013-06-24"
# for user in page_query(User.query.filter(User.next_refresh < now).order_by(User.next_refresh.asc())):
# for user in page_query(User.query.filter(User.next_refresh <= now)):
# for user in page_query(User.query):
# linked_accounts_to_sync = {
# "figshare": user.figshare_id,
# "github": user.github_id,
# "orcid": user.orcid_id,
# "slideshare": user.slideshare_id
# }
# has_linked_account = [account for account in linked_accounts_to_sync if linked_accounts_to_sync[account]]
# if has_linked_account:
# i += 1
# print u"{i} user {url_slug} has linked account: {has_linked_account} {next_refresh} ".format(
# i=i, url_slug=user.url_slug, has_linked_account=has_linked_account, next_refresh=user.next_refresh)
# for account in has_linked_account:
# tiids = update_from_linked_account.delay(user, account)
db.create_all()
add_profile_deets_for_everyone()
| from totalimpactwebapp.user import User
from totalimpactwebapp import db
import datetime
import tasks
"""
requires these env vars be set in this environment:
DATABASE_URL
"""
def page_query(q):
offset = 0
while True:
r = False
for elem in q.limit(100).offset(offset):
r = True
yield elem
offset += 100
if not r:
break
def add_profile_deets_for_everyone():
for user in page_query(User.query.order_by(User.url_slug.asc())):
print user.url_slug
# tasks.add_profile_deets.delay(user)
tasks.add_profile_deets(user)
def deduplicate_everyone():
for user in page_query(User.query.order_by(User.url_slug.asc())):
print user.url_slug
removed_tiids = tasks.deduplicate.delay(user)
def put_linked_account_users_on_queue():
i = 0
# now = datetime.datetime.utcnow().isoformat()
now = "2013-06-24"
# for user in page_query(User.query.filter(User.next_refresh < now).order_by(User.next_refresh.asc())):
# for user in page_query(User.query.filter(User.next_refresh <= now)):
# for user in page_query(User.query):
# linked_accounts_to_sync = {
# "figshare": user.figshare_id,
# "github": user.github_id,
# "orcid": user.orcid_id,
# "slideshare": user.slideshare_id
# }
# has_linked_account = [account for account in linked_accounts_to_sync if linked_accounts_to_sync[account]]
# if has_linked_account:
# i += 1
# print u"{i} user {url_slug} has linked account: {has_linked_account} {next_refresh} ".format(
# i=i, url_slug=user.url_slug, has_linked_account=has_linked_account, next_refresh=user.next_refresh)
# for account in has_linked_account:
# tiids = update_from_linked_account.delay(user, account)
db.create_all()
add_profile_deets_for_everyone()
| Python | 0.000618 |
a2cc8a5e6009bda68edf85a432d9a8ec002e99a1 | Fix #80 | adapter/__init__.py | adapter/__init__.py | import sys
PY2 = sys.version_info[0] == 2
if PY2:
is_string = lambda v: isinstance(v, basestring)
# python2-based LLDB accepts utf8-encoded ascii strings only.
to_lldb_str = lambda s: s.encode('utf8', 'backslashreplace') if isinstance(s, unicode) else s
from_lldb_str = lambda s: s.decode('utf8', 'replace')
xrange = xrange
else:
is_string = lambda v: isinstance(v, str)
to_lldb_str = str
from_lldb_str = str
xrange = range
import adapter.main
| import sys
PY2 = sys.version_info[0] == 2
if PY2:
is_string = lambda v: isinstance(v, basestring)
to_lldb_str = lambda s: s.encode('utf8', 'backslashreplace')
from_lldb_str = lambda s: s.decode('utf8', 'replace')
xrange = xrange
else:
is_string = lambda v: isinstance(v, str)
to_lldb_str = str
from_lldb_str = str
xrange = range
import adapter.main
| Python | 0.000001 |
2930d355421ed3804d5c675fad20c82f27066d7e | Support numpy 1.9 | chainer/functions/array/broadcast.py | chainer/functions/array/broadcast.py | import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _backward_one(x, g):
if g is None:
xp = cuda.get_array_module(x)
return xp.zeros_like(x)
if g.ndim != x.ndim:
g = g.sum(axis=tuple(range(g.ndim - x.ndim)))
# An input variable is always an array, not a scalar.
# We need to convert a scalar value to a zero-dim array.
xp = cuda.get_array_module(x)
if xp.isscalar(g):
g = xp.array(g)
axis = tuple(i for i, sx in enumerate(x.shape) if sx == 1)
if len(axis) > 0:
return g.sum(keepdims=True, axis=axis)
else:
return g
class Broadcast(function.Function):
"""Function that broadcasts given arrays."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
shapes = [t.eval().shape for t in in_types]
r_shapes = [s[::-1] for s in shapes]
r_filled = six.moves.zip_longest(*r_shapes, fillvalue=1)
for ss in r_filled:
d = max(ss)
if not all(s == d or s == 1 for s in ss):
expect = 'each dimension has the same size or is 1'
actual = 'shapes: ' + ', '.join(map(str, shapes))
raise type_check.InvalidType(expect, actual)
def forward(self, xs):
xp = cuda.get_array_module(*xs)
return tuple(xp.broadcast_arrays(*xs))
def backward(self, xs, grads):
return tuple(_backward_one(x, g) for x, g in six.moves.zip(xs, grads))
def broadcast(*args):
"""Broadcast given variables.
Args:
args (Variables): Variables to be broadcasted.
Returns:
``tuple``: Tuple of :class:`~chainer.Variable` objects which are
broadcasted from given arguments.
"""
return Broadcast()(*args)
class BroadcastTo(function.Function):
"""Function that broadcasts an array to a new shape."""
def __init__(self, shape):
shape = tuple(shape)
self._shape = shape
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
ndim = type_check.Variable(len(self._shape), 'len(shape)')
type_check.expect(in_types[0].ndim <= ndim)
shape = in_types[0].shape.eval()
for i in range(len(shape)):
j = -i - 1
if shape[j] == self._shape[j] or shape[j] == 1:
continue
expect = 'in_type[0].shape[%d] == %d' % (j, self._shape[j])
if self._shape[j] != 1:
expect += ' or in_type[0].shape[%d] == 1' % j
actual = 'in_type[0].shape: %s' % str(shape)
raise type_check.InvalidType(expect, actual)
def forward(self, xs):
xp = cuda.get_array_module(*xs)
x = xs[0]
if hasattr(xp, 'broadcast_to'):
return xp.broadcast_to(x, self._shape),
else:
# numpy 1.9 doesn't support broadcast_to method
dummy = xp.empty(self._shape)
bx, _ = xp.broadcast_arrays(x, dummy)
return bx,
def backward(self, xs, grads):
return tuple(_backward_one(x, g) for x, g in six.moves.zip(xs, grads))
def broadcast_to(x, shape):
return BroadcastTo(shape)(x)
| import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _backward_one(x, g):
if g is None:
xp = cuda.get_array_module(x)
return xp.zeros_like(x)
if g.ndim != x.ndim:
g = g.sum(axis=tuple(range(g.ndim - x.ndim)))
# An input variable is always an array, not a scalar.
# We need to convert a scalar value to a zero-dim array.
xp = cuda.get_array_module(x)
if xp.isscalar(g):
g = xp.array(g)
axis = tuple(i for i, sx in enumerate(x.shape) if sx == 1)
if len(axis) > 0:
return g.sum(keepdims=True, axis=axis)
else:
return g
class Broadcast(function.Function):
"""Function that broadcasts given arrays."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
shapes = [t.eval().shape for t in in_types]
r_shapes = [s[::-1] for s in shapes]
r_filled = six.moves.zip_longest(*r_shapes, fillvalue=1)
for ss in r_filled:
d = max(ss)
if not all(s == d or s == 1 for s in ss):
expect = 'each dimension has the same size or is 1'
actual = 'shapes: ' + ', '.join(map(str, shapes))
raise type_check.InvalidType(expect, actual)
def forward(self, xs):
xp = cuda.get_array_module(*xs)
return tuple(xp.broadcast_arrays(*xs))
def backward(self, xs, grads):
return tuple(_backward_one(x, g) for x, g in six.moves.zip(xs, grads))
def broadcast(*args):
"""Broadcast given variables.
Args:
args (Variables): Variables to be broadcasted.
Returns:
``tuple``: Tuple of :class:`~chainer.Variable` objects which are
broadcasted from given arguments.
"""
return Broadcast()(*args)
class BroadcastTo(function.Function):
"""Function that broadcasts an array to a new shape."""
def __init__(self, shape):
shape = tuple(shape)
self._shape = shape
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
ndim = type_check.Variable(len(self._shape), 'len(shape)')
type_check.expect(in_types[0].ndim <= ndim)
shape = in_types[0].shape.eval()
for i in range(len(shape)):
j = -i - 1
if shape[j] == self._shape[j] or shape[j] == 1:
continue
expect = 'in_type[0].shape[%d] == %d' % (j, self._shape[j])
if self._shape[j] != 1:
expect += ' or in_type[0].shape[%d] == 1' % j
actual = 'in_type[0].shape: %s' % str(shape)
raise type_check.InvalidType(expect, actual)
def forward(self, xs):
xp = cuda.get_array_module(*xs)
x = xs[0]
return xp.broadcast_to(x, self._shape),
def backward(self, xs, grads):
return tuple(_backward_one(x, g) for x, g in six.moves.zip(xs, grads))
def broadcast_to(x, shape):
return BroadcastTo(shape)(x)
| Python | 0.00004 |
dc50a4ec058f9893e87a069bc64e4715ecfa0bea | Add initial status code assertion | haas_rest_test/plugins/assertions.py | haas_rest_test/plugins/assertions.py | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from jsonschema.exceptions import ValidationError
import jsonschema
from ..exceptions import YamlParseError
class StatusCodeAssertion(object):
_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'Assertion on status code ',
'description': 'Test case markup for Haas Rest Test',
'type': 'object',
'properties': {
'expected': {
'type': 'integer',
},
},
'required': ['expected']
}
def __init__(self, expected_status):
super(StatusCodeAssertion, self).__init__()
self.expected_status = expected_status
@classmethod
def from_dict(cls, data):
try:
jsonschema.validate(data, cls._schema)
except ValidationError as e:
raise YamlParseError(str(e))
return cls(expected_status=data['expected'])
def run(self, case, response):
case.fail()
| # -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
class StatusCodeAssertion(object):
_schema = {
}
def __init__(self, valid_codes):
super(StatusCodeAssertion, self).__init__()
self.valid_codes = valid_codes
@classmethod
def from_dict(cls, data):
# FIXME: Validate input with jsonschema
return cls(valid_codes=data['expected'])
| Python | 0.000004 |
b9ea36d80ec256988a772e621eb91481cff5e464 | Bump version to 0.3 | cicoclient/shell.py | cicoclient/shell.py | # Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import os
from cliff.app import App
from cliff.commandmanager import CommandManager
class CicoCli(App):
"""
CLI interface boilerplate with cliff
"""
def __init__(self):
super(CicoCli, self).__init__(
description='CLI interface to admin.ci.centos.org',
version='0.3',
command_manager=CommandManager('cico.cli'),
deferred_help=True,
)
def build_option_parser(self, description, version):
parser = super(CicoCli, self).build_option_parser(description, version)
# Global arguments
parser.add_argument(
'--endpoint',
metavar='<endpoint>',
help='Endpoint to the admin.ci.centos.org service.\n'
' Defaults to: http://admin.ci.centos.org:8080/',
default='http://admin.ci.centos.org:8080/'
)
parser.add_argument(
'--api-key',
metavar='<api-key>',
help='API key to admin.ci.centos.org service. Defaults to'
' environment variable for CICO_API_KEY.',
default=os.getenv('CICO_API_KEY', None)
)
return parser
def initialize_app(self, argv):
self.LOG.debug('initialize_app')
def prepare_to_run_command(self, cmd):
self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.LOG.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.LOG.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
cicocli = CicoCli()
return cicocli.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| # Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import os
from cliff.app import App
from cliff.commandmanager import CommandManager
class CicoCli(App):
"""
CLI interface boilerplate with cliff
"""
def __init__(self):
super(CicoCli, self).__init__(
description='CLI interface to admin.ci.centos.org',
version='0.2',
command_manager=CommandManager('cico.cli'),
deferred_help=True,
)
def build_option_parser(self, description, version):
parser = super(CicoCli, self).build_option_parser(description, version)
# Global arguments
parser.add_argument(
'--endpoint',
metavar='<endpoint>',
help='Endpoint to the admin.ci.centos.org service.\n'
' Defaults to: http://admin.ci.centos.org:8080/',
default='http://admin.ci.centos.org:8080/'
)
parser.add_argument(
'--api-key',
metavar='<api-key>',
help='API key to admin.ci.centos.org service. Defaults to'
' environment variable for CICO_API_KEY.',
default=os.getenv('CICO_API_KEY', None)
)
return parser
def initialize_app(self, argv):
self.LOG.debug('initialize_app')
def prepare_to_run_command(self, cmd):
self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.LOG.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.LOG.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
cicocli = CicoCli()
return cicocli.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 0 |
561de1d124289058eafde34547e8fc773c3e9793 | Rename strips to d_strips | board.py | board.py |
import direction_strips as ds_m
from pente_exceptions import *
from defines import *
class Board():
def __init__(self, size, clone_it=False):
self.size = size
if not clone_it:
self.set_to_empty()
def set_to_empty(self):
self.d_strips = []
self.d_strips.append(ds_m.EDirectionStrips(self.size))
self.d_strips.append(ds_m.SEDirectionStrips(self.size))
self.d_strips.append(ds_m.SDirectionStrips(self.size))
self.d_strips.append(ds_m.SWDirectionStrips(self.size))
def key(self):
k = 0
estrips = self.d_strips[0]
for s in estrips.strips:
k += s
k *= 4 ** self.size
return k
def get_direction_strips(self):
return self.d_strips
def clone(self):
new_board = Board(self.size, clone_it=True)
new_board.d_strips = [s.clone() for s in self.d_strips]
return new_board
def __repr__(self):
size = self.size
rep = '\n'
for j in range(size-1,-1,-1):
line = [ ['.','B','W'][self.d_strips[0].get_occ((i,j))] for i in range(size) ]
rep = rep + ' '.join(line) + '\n'
return rep
def get_size(self):
return self.size
def off_board(self, pos):
x,y = pos
size = self.size
return x < 0 or \
x >= size or \
y < 0 or \
y >= size
def get_occ(self, pos):
if self.off_board(pos):
raise OffBoardException
colour = self.d_strips[0].get_occ(pos)
return colour
def set_occ(self, pos, colour):
if self.off_board(pos):
raise OffBoardException
for s in self.d_strips:
# We maintain the board position in four ways, update them all
s.set_occ(pos, colour)
|
import direction_strips as ds_m
from pente_exceptions import *
from defines import *
class Board():
def __init__(self, size, clone_it=False):
self.size = size
if not clone_it:
self.set_to_empty()
def set_to_empty(self):
self.strips = [] # TODO Rename to d_strips
self.strips.append(ds_m.EDirectionStrips(self.size))
self.strips.append(ds_m.SEDirectionStrips(self.size))
self.strips.append(ds_m.SDirectionStrips(self.size))
self.strips.append(ds_m.SWDirectionStrips(self.size))
def key(self):
return tuple(self.strips[0].strips)
def get_direction_strips(self):
return self.strips
def clone(self):
new_board = Board(self.size, clone_it=True)
new_board.strips = [s.clone() for s in self.strips]
return new_board
def __repr__(self):
size = self.size
rep = '\n'
for j in range(size-1,-1,-1):
line = [ ['.','B','W'][self.strips[0].get_occ((i,j))] for i in range(size) ]
rep = rep + ' '.join(line) + '\n'
return rep
def get_size(self):
return self.size
def off_board(self, pos):
x,y = pos
size = self.size
return x < 0 or \
x >= size or \
y < 0 or \
y >= size
def get_occ(self, pos):
if self.off_board(pos):
raise OffBoardException
colour_new = self.strips[0].get_occ(pos)
return colour_new
def set_occ(self, pos, colour):
if self.off_board(pos):
raise OffBoardException
for s in self.strips:
# We maintain the board position in four ways, update them all
s.set_occ(pos, colour)
| Python | 0.999681 |
62e9510fe2fbe3186c7c817a5c287322a65b1dc9 | Fix linearPotential import to new package structure | galpy/potential/IsothermalDiskPotential.py | galpy/potential/IsothermalDiskPotential.py | ###############################################################################
# IsothermalDiskPotential.py: class that implements the one-dimensional
# self-gravitating isothermal disk
###############################################################################
import numpy
from .linearPotential import linearPotential, _APY_LOADED
if _APY_LOADED:
from astropy import units
class IsothermalDiskPotential(linearPotential):
"""Class representing the one-dimensional self-gravitating isothermal disk
.. math::
\\rho(x) = \\mathrm{amp}\\,\\mathrm{sech}^2\\left(\\frac{x}{2H}\\right)
where the scale height :math:`H^2 = \\sigma^2/[8\\pi G \\,\\mathrm{amp}]`. The parameter to setup the disk is the velocity dispersion :math:`\\sigma`.
"""
def __init__(self,amp=1.,sigma=0.1,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize an IsothermalDiskPotential
INPUT:
amp - an overall amplitude
sigma - velocity dispersion (can be a Quantity)
OUTPUT:
instance
HISTORY:
2018-04-11 - Written - Bovy (UofT)
"""
linearPotential.__init__(self,amp=amp,ro=ro,vo=vo)
if _APY_LOADED and isinstance(sigma,units.Quantity):
sigma= sigma.to(units.km/units.s).value/self._vo
self._sigma2= sigma**2.
self._H= sigma/numpy.sqrt(8.*numpy.pi*self._amp)
def _evaluate(self,x,t=0.):
return 2.*self._sigma2*numpy.log(numpy.cosh(0.5*x/self._H))
def _force(self,x,t=0.):
return -self._sigma2*numpy.tanh(0.5*x/self._H)/self._H
| ###############################################################################
# IsothermalDiskPotential.py: class that implements the one-dimensional
# self-gravitating isothermal disk
###############################################################################
import numpy
from galpy.util import bovy_conversion
from galpy.potential_src.linearPotential import linearPotential, _APY_LOADED
if _APY_LOADED:
from astropy import units
class IsothermalDiskPotential(linearPotential):
"""Class representing the one-dimensional self-gravitating isothermal disk
.. math::
\\rho(x) = \\mathrm{amp}\\,\\mathrm{sech}^2\\left(\\frac{x}{2H}\\right)
where the scale height :math:`H^2 = \\sigma^2/[8\\pi G \\,\\mathrm{amp}]`. The parameter to setup the disk is the velocity dispersion :math:`\\sigma`.
"""
def __init__(self,amp=1.,sigma=0.1,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize an IsothermalDiskPotential
INPUT:
amp - an overall amplitude
sigma - velocity dispersion (can be a Quantity)
OUTPUT:
instance
HISTORY:
2018-04-11 - Written - Bovy (UofT)
"""
linearPotential.__init__(self,amp=amp,ro=ro,vo=vo)
if _APY_LOADED and isinstance(sigma,units.Quantity):
sigma= sigma.to(units.km/units.s).value/self._vo
self._sigma2= sigma**2.
self._H= sigma/numpy.sqrt(8.*numpy.pi*self._amp)
def _evaluate(self,x,t=0.):
return 2.*self._sigma2*numpy.log(numpy.cosh(0.5*x/self._H))
def _force(self,x,t=0.):
return -self._sigma2*numpy.tanh(0.5*x/self._H)/self._H
| Python | 0.000022 |
1e012f6fc25e2be5bc55b7cae9f04a3a33ac86e5 | use static url for ckeditor media serving | ckeditor/widgets.py | ckeditor/widgets.py | from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.encoding import force_unicode
from django.utils import simplejson
from django.core.exceptions import ImproperlyConfigured
from django.forms.util import flatatt
json_encode = simplejson.JSONEncoder().encode
DEFAULT_CONFIG = {
'skin': 'django',
'toolbar': 'Full',
'height': 291,
'width': 835,
'filebrowserWindowWidth': 940,
'filebrowserWindowHeight': 725,
}
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
Supports direct image uploads and embed.
"""
class Media:
try:
js = (
settings.STATIC_URL + 'ckeditor/ckeditor/ckeditor.js',
)
except AttributeError:
raise ImproperlyConfigured("django-ckeditor requires \
CKEDITOR_MEDIA_PREFIX setting. This setting specifies a \
URL prefix to the ckeditor JS and CSS media (not \
uploaded media). Make sure to use a trailing slash: \
CKEDITOR_MEDIA_PREFIX = '/media/ckeditor/'")
def __init__(self, config_name='default', *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Setup config from defaults.
self.config = DEFAULT_CONFIG.copy()
# Try to get valid config from settings.
configs = getattr(settings, 'CKEDITOR_CONFIGS', None)
if configs != None:
if isinstance(configs, dict):
# Make sure the config_name exists.
if config_name in configs:
config = configs[config_name]
# Make sure the configuration is a dictionary.
if not isinstance(config, dict):
raise ImproperlyConfigured('CKEDITOR_CONFIGS["%s"] \
setting must be a dictionary type.' % \
config_name)
# Override defaults with settings config.
self.config.update(config)
else:
raise ImproperlyConfigured("No configuration named '%s' \
found in your CKEDITOR_CONFIGS setting." % \
config_name)
else:
raise ImproperlyConfigured('CKEDITOR_CONFIGS setting must be a\
dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.config['filebrowserUploadUrl'] = reverse('ckeditor_upload')
self.config['filebrowserBrowseUrl'] = reverse('ckeditor_browse')
return mark_safe(render_to_string('ckeditor/widget.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'config': json_encode(self.config)
})
)
| from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.encoding import force_unicode
from django.utils import simplejson
from django.core.exceptions import ImproperlyConfigured
from django.forms.util import flatatt
json_encode = simplejson.JSONEncoder().encode
DEFAULT_CONFIG = {
'skin': 'django',
'toolbar': 'Full',
'height': 291,
'width': 835,
'filebrowserWindowWidth': 940,
'filebrowserWindowHeight': 725,
}
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
Supports direct image uploads and embed.
"""
class Media:
try:
js = (
settings.CKEDITOR_MEDIA_PREFIX + 'ckeditor/ckeditor.js',
)
except AttributeError:
raise ImproperlyConfigured("django-ckeditor requires \
CKEDITOR_MEDIA_PREFIX setting. This setting specifies a \
URL prefix to the ckeditor JS and CSS media (not \
uploaded media). Make sure to use a trailing slash: \
CKEDITOR_MEDIA_PREFIX = '/media/ckeditor/'")
def __init__(self, config_name='default', *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Setup config from defaults.
self.config = DEFAULT_CONFIG.copy()
# Try to get valid config from settings.
configs = getattr(settings, 'CKEDITOR_CONFIGS', None)
if configs != None:
if isinstance(configs, dict):
# Make sure the config_name exists.
if config_name in configs:
config = configs[config_name]
# Make sure the configuration is a dictionary.
if not isinstance(config, dict):
raise ImproperlyConfigured('CKEDITOR_CONFIGS["%s"] \
setting must be a dictionary type.' % \
config_name)
# Override defaults with settings config.
self.config.update(config)
else:
raise ImproperlyConfigured("No configuration named '%s' \
found in your CKEDITOR_CONFIGS setting." % \
config_name)
else:
raise ImproperlyConfigured('CKEDITOR_CONFIGS setting must be a\
dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.config['filebrowserUploadUrl'] = reverse('ckeditor_upload')
self.config['filebrowserBrowseUrl'] = reverse('ckeditor_browse')
return mark_safe(render_to_string('ckeditor/widget.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'config': json_encode(self.config)
})
)
| Python | 0 |
b927fe276af848b6c9a4653e04421a739e63037c | remove unused import | test/test_graphprot.py | test/test_graphprot.py | from scripttest import TestFileEnvironment
import re
# from filecmp import cmp
bindir = "graphprot/"
script = "graphprot_seqmodel"
# test file environment
datadir = "test/"
testdir = "test/testenv_graphprot_seqmodel/"
# directories relative to test file environment
bindir_rel = "../../" + bindir
datadir_rel = "../../" + datadir
env = TestFileEnvironment(testdir)
def test_invocation_no_params():
"Call without parameters should return usage information."
call = bindir_rel + script
run = env.run(
call,
expect_error=True)
assert run.returncode == 2
assert re.match("usage", run.stderr), "stderr should contain usage information: {}".format(run.stderr)
def test_simple_fit():
"Train a model on 10 positive and 10 negative sequences using default paramters."
outfile = "test_simple_fit.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
outfile
)
# ../../graphprot/graphprot_seqmodel -vvv fit -p ../../test/PARCLIP_MOV10_Sievers_100seqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_100seqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --n-iter 1
env.run(call)
call = bindir_rel + script + " -vvv estimate -p {} -n {} --output-dir ./ --model-file {} --cross-validation".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
outfile
)
# ../../graphprot/graphprot_seqmodel -vvv estimate -p ../../test/PARCLIP_MOV10_Sievers_1kseqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_1kseqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --cross-validation
run = env.run(
call,
expect_stderr=True,
)
stdout = open(testdir + "test_simple_fit_estimate.out", "w")
stdout.write(run.stdout)
| from scripttest import TestFileEnvironment
import re
import os
# from filecmp import cmp
bindir = "graphprot/"
script = "graphprot_seqmodel"
# test file environment
datadir = "test/"
testdir = "test/testenv_graphprot_seqmodel/"
# directories relative to test file environment
bindir_rel = "../../" + bindir
datadir_rel = "../../" + datadir
env = TestFileEnvironment(testdir)
def test_invocation_no_params():
"Call without parameters should return usage information."
call = bindir_rel + script
run = env.run(
call,
expect_error=True)
assert run.returncode == 2
assert re.match("usage", run.stderr), "stderr should contain usage information: {}".format(run.stderr)
def test_simple_fit():
"Train a model on 10 positive and 10 negative sequences using default paramters."
outfile = "test_simple_fit.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
outfile
)
# ../../graphprot/graphprot_seqmodel -vvv fit -p ../../test/PARCLIP_MOV10_Sievers_100seqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_100seqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --n-iter 1
env.run(call)
call = bindir_rel + script + " -vvv estimate -p {} -n {} --output-dir ./ --model-file {} --cross-validation".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
outfile
)
# ../../graphprot/graphprot_seqmodel -vvv estimate -p ../../test/PARCLIP_MOV10_Sievers_1kseqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_1kseqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --cross-validation
run = env.run(
call,
expect_stderr=True,
)
stdout = open(testdir + "test_simple_fit_estimate.out", "w")
stdout.write(run.stdout)
| Python | 0.000001 |
c04bd5e52b4a516e31f98231cc6ccb7853040d2b | fix bad middleware arg | corehq/middleware.py | corehq/middleware.py | import logging
import os
import datetime
from django.conf import settings
try:
import psutil
except ImportError:
psutil = None
# this isn't OR specific, but we like it to be included
OPENROSA_ACCEPT_LANGUAGE = "HTTP_ACCEPT_LANGUAGE"
OPENROSA_VERSION_HEADER = "HTTP_X_OPENROSA_VERSION"
OPENROSA_DATE_HEADER = "HTTP_DATE"
OPENROSA_HEADERS = [OPENROSA_VERSION_HEADER, OPENROSA_DATE_HEADER, OPENROSA_ACCEPT_LANGUAGE]
class OpenRosaMiddleware(object):
"""
Middleware to support OpenRosa request/response standards compliance
https://bitbucket.org/javarosa/javarosa/wiki/OpenRosaRequest
"""
def __init__(self):
pass
def process_request(self, request):
# if there's a date header specified add that to the request
# as a first class property
or_headers = {}
for header in OPENROSA_HEADERS:
if header in request.META:
or_headers[header] = request.META[header]
request.openrosa_headers = or_headers
def process_response(self, request, response):
response[OPENROSA_VERSION_HEADER] = settings.OPENROSA_VERSION
return response
profile_logger = logging.getLogger('profile_middleware')
class MemoryUsageMiddleware(object):
"""
Stolen and modified from http://stackoverflow.com/a/12254394/8207
This is a pretty poor, blunt tool and is not recommended to be treated as definitive truth.
"""
_psutil_installed = None
def _check_psutil(self):
if self._psutil_installed is None:
if psutil is None:
profile_logger.warning('Install dev-requirements (psutil) in order to use MemoryUsageMiddleware')
self._psutil_installed = False
else:
self._psutil_installed = True
return self._psutil_installed
def process_request(self, request):
if self._check_psutil():
request._profile_memory = psutil.Process(os.getpid()).get_memory_info()
def process_response(self, request, response):
if self._check_psutil() and hasattr(request, '_profile_memory'):
mem = psutil.Process(os.getpid()).get_memory_info()
diff = (mem.rss - request._profile_memory.rss) / 1024
profile_logger.info('{} memory usage {} KB'.format(request.path, diff))
return response
class TimingMiddleware(object):
def process_request(self, request):
request._profile_starttime = datetime.datetime.utcnow()
def process_response(self, request, response):
if hasattr(request, '_profile_starttime'):
duration = datetime.datetime.utcnow() - request._profile_starttime
profile_logger.info('{} time {}'.format(request.path, duration), extra={'duration': duration})
return response
| import logging
import os
import datetime
from django.conf import settings
try:
import psutil
except ImportError:
psutil = None
# this isn't OR specific, but we like it to be included
OPENROSA_ACCEPT_LANGUAGE = "HTTP_ACCEPT_LANGUAGE"
OPENROSA_VERSION_HEADER = "HTTP_X_OPENROSA_VERSION"
OPENROSA_DATE_HEADER = "HTTP_DATE"
OPENROSA_HEADERS = [OPENROSA_VERSION_HEADER, OPENROSA_DATE_HEADER, OPENROSA_ACCEPT_LANGUAGE]
class OpenRosaMiddleware(object):
"""
Middleware to support OpenRosa request/response standards compliance
https://bitbucket.org/javarosa/javarosa/wiki/OpenRosaRequest
"""
def __init__(self):
pass
def process_request(self, request):
# if there's a date header specified add that to the request
# as a first class property
or_headers = {}
for header in OPENROSA_HEADERS:
if header in request.META:
or_headers[header] = request.META[header]
request.openrosa_headers = or_headers
def process_response(self, request, response):
response[OPENROSA_VERSION_HEADER] = settings.OPENROSA_VERSION
return response
profile_logger = logging.getLogger('profile_middleware')
class MemoryUsageMiddleware(object):
"""
Stolen and modified from http://stackoverflow.com/a/12254394/8207
This is a pretty poor, blunt tool and is not recommended to be treated as definitive truth.
"""
_psutil_installed = None
def _check_psutil(self):
if self._psutil_installed is None:
if psutil is None:
profile_logger.warning('Install dev-requirements (psutil) in order to use MemoryUsageMiddleware')
self._psutil_installed = False
else:
self._psutil_installed = True
return self._psutil_installed
def process_request(self, request):
if self._check_psutil():
request._profile_memory = psutil.Process(os.getpid()).get_memory_info()
def process_response(self, request, response):
if self._check_psutil() and hasattr(request, '_profile_memory'):
mem = psutil.Process(os.getpid()).get_memory_info()
diff = (mem.rss - request._profile_memory.rss) / 1024
profile_logger.info('{} memory usage {} KB'.format(request.path, diff))
return response
class TimingMiddleware(object):
def process_request(self, request):
request._profile_starttime = datetime.datetime.utcnow()
def process_response(self, request, response):
if hasattr(request, '_profile_starttime'):
duration = datetime.datetime.utcnow() - request._profile_starttime
profile_logger.info('{} time {}'.format(request.path, duration), extras={'duration': duration})
return response
| Python | 0.000169 |
56f24e52f961da414f0610e6bd815812b4a14ef6 | Update utils.py | classifier/utils.py | classifier/utils.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions for LaserTagger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from typing import Iterator, Mapping, Sequence, Text, Tuple
import tensorflow as tf
def get_token_list(text):
"""Returns a list of tokens.
This function expects that the tokens in the text are separated by space
character(s). Example: "ca n't , touch". This is the case at least for the
public DiscoFuse and WikiSplit datasets.
Args:
text: String to be split into tokens.
"""
return text.split()
def yield_sources_and_targets_meaning(input_file):
"""Reads and yields source lists and targets from the input file.
Args:
input_file: Path to the input file.
Yields:
Tuple with (list of source texts, target text).
"""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
if len(line.rstrip('\n').split('\t')) == 3:
source, summary, score = line.rstrip('\n').split('\t')
yield [source], summary, score
def yield_sources_and_targets_grammar(input_file):
"""Reads and yields source lists and targets from the input file.
Args:
input_file: Path to the input file.
Yields:
Tuple with (list of source texts, target text).
"""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
source, score = line.rstrip('\n').split('\t')
yield [source], None, score
| # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions for LaserTagger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from typing import Iterator, Mapping, Sequence, Text, Tuple
import tensorflow as tf
def get_token_list(text):
"""Returns a list of tokens.
This function expects that the tokens in the text are separated by space
character(s). Example: "ca n't , touch". This is the case at least for the
public DiscoFuse and WikiSplit datasets.
Args:
text: String to be split into tokens.
"""
return text.split()
def yield_sources_and_targets_meaning(input_file):
"""Reads and yields source lists and targets from the input file.
Args:
input_file: Path to the input file.
Yields:
Tuple with (list of source texts, target text).
"""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
source, summary, score = line.rstrip('\n').split('\t')
yield [source], summary, score
def yield_sources_and_targets_grammar(input_file):
"""Reads and yields source lists and targets from the input file.
Args:
input_file: Path to the input file.
Yields:
Tuple with (list of source texts, target text).
"""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
source, score = line.rstrip('\n').split('\t')
yield [source], None, score
| Python | 0.000001 |
c28112624b3c735c610f756a6bff1497e9516c64 | Revise naive alg to more intuitive one: separate checking index and value | alg_find_peak_1D.py | alg_find_peak_1D.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def find_peak_naive(arr):
"""Find peak by naive iteration.
Time complexity: O(n).
"""
for i in range(len(arr)):
if i == 0:
if arr[i] > arr[i + 1]:
return arr[i]
elif i == (len(arr) - 1):
if arr[i] > arr[i - 1]:
return arr[i]
else:
if arr[i] > arr[i - 1] and arr[i] > arr[i + 1]:
return arr[i]
def find_peak(arr):
"""Find peak by divide-end-conquer algorithm.
Time complexity: O(logn).
"""
if len(arr) == 1:
return arr[0]
else:
mid = len(arr) // 2
if arr[mid] < arr[mid - 1]:
return find_peak(arr[:mid-1])
elif arr[mid] < arr[mid + 1]:
return find_peak(arr[mid+1:])
else:
return arr[mid]
def main():
import time
# Array with peak 4.
arr = [0, 1, 4, 3, 2]
# Find peak by naive version.
time_start = time.time()
peak = find_peak_naive(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
# Find peak by divide-end-conquer algorithm.
time_start = time.time()
peak = find_peak(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def find_peak_naive(arr):
"""Find peak by naive iteration.
Time complexity: O(n).
"""
for i in range(len(arr)):
if i == 0 and arr[i] > arr[i + 1]:
return arr[i]
elif i == (len(arr) - 1) and arr[i] > arr[i - 1]:
return arr[i]
elif (0 < i < (len(arr) - 1) and
arr[i] > arr[i - 1] and arr[i] > arr[i + 1]):
return arr[i]
else:
pass
def find_peak(arr):
"""Find peak by divide-end-conquer algorithm.
Time complexity: O(logn).
"""
if len(arr) == 1:
return arr[0]
else:
mid = len(arr) // 2
if arr[mid] < arr[mid - 1]:
return find_peak(arr[:mid-1])
elif arr[mid] < arr[mid + 1]:
return find_peak(arr[mid+1:])
else:
return arr[mid]
def main():
import time
# Array with peak 4.
arr = [0, 1, 4, 3, 2]
# Find peak by naive version.
time_start = time.time()
peak = find_peak_naive(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
# Find peak by divide-end-conquer algorithm.
time_start = time.time()
peak = find_peak(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
if __name__ == '__main__':
main()
| Python | 0.000016 |
8a44ec213272536d59d16118e2a13c533299242e | fix missing = in SystemCSerializer | hwt/serializer/systemC/serializer.py | hwt/serializer/systemC/serializer.py | from jinja2.environment import Environment
from jinja2.loaders import PackageLoader
from hwt.hdlObjects.types.enum import Enum
from hwt.hdlObjects.types.enumVal import EnumVal
from hwt.serializer.generic.serializer import GenericSerializer
from hwt.serializer.serializerClases.nameScope import LangueKeyword
from hwt.serializer.systemC.keywords import SYSTEMC_KEYWORDS
from hwt.serializer.systemC.statements import SystemCSerializer_statements
from hwt.serializer.systemC.type import SystemCSerializer_type
from hwt.serializer.systemC.value import SystemCSerializer_value
from hwt.serializer.utils import maxStmId
from hwt.synthesizer.param import evalParam
class SystemCSerializer(GenericSerializer, SystemCSerializer_value, SystemCSerializer_type, SystemCSerializer_statements):
"""
Serialized used to convert HWT design to SystemC code
"""
fileExtension = '.cpp'
_keywords_dict = {kw: LangueKeyword() for kw in SYSTEMC_KEYWORDS}
env = Environment(loader=PackageLoader('hwt', 'serializer/systemC/templates'))
moduleTmpl = env.get_template('module.cpp')
mehtodTmpl = env.get_template("method.cpp")
ifTmpl = env.get_template("if.cpp")
switchStm = env.get_template("switch.cpp")
@classmethod
def comment(cls, comentStr):
return "/* %s */" % comentStr
@classmethod
def PortItem(cls, pi, ctx):
d = cls.DIRECTION(pi.direction)
return "sc_%s<%s> %s;" % (d,
cls.HdlType(pi._dtype, ctx),
pi.name)
@classmethod
def DIRECTION(cls, d):
return d.name.lower()
@classmethod
def Architecture(cls, arch, ctx):
variables = []
procs = []
extraTypes = set()
extraTypes_serialized = []
scope = ctx.scope
childCtx = ctx.withIndent()
arch.variables.sort(key=lambda x: x.name)
arch.processes.sort(key=lambda x: (x.name, maxStmId(x)))
arch.componentInstances.sort(key=lambda x: x._name)
ports = list(map(lambda pi: cls.PortItem(pi, childCtx), arch.entity.ports))
for v in arch.variables:
t = v._dtype
# if type requires extra definition
if isinstance(t, Enum) and t not in extraTypes:
extraTypes.add(v._dtype)
extraTypes_serialized.append(cls.HdlType(t, scope, declaration=True))
v.name = scope.checkedName(v.name, v)
variables.append(v)
def serializeVar(v):
dv = evalParam(v.defaultVal)
if isinstance(dv, EnumVal):
dv = "%s.%s" % (dv._dtype.name, dv.val)
else:
dv = cls.Value(dv, None)
return v.name, cls.HdlType(v._dtype), dv
for p in arch.processes:
procs.append(cls.HWProcess(p, childCtx))
constants = []
return cls.moduleTmpl.render(
name=arch.getEntityName(),
constants=constants,
ports=ports,
signals=list(map(serializeVar, variables)),
extraTypes=extraTypes_serialized,
processes=procs,
processObjects=arch.processes,
componentInstances=arch.componentInstances,
)
| from jinja2.environment import Environment
from jinja2.loaders import PackageLoader
from hwt.hdlObjects.types.enum import Enum
from hwt.hdlObjects.types.enumVal import EnumVal
from hwt.serializer.generic.serializer import GenericSerializer
from hwt.serializer.serializerClases.nameScope import LangueKeyword
from hwt.serializer.systemC.keywords import SYSTEMC_KEYWORDS
from hwt.serializer.systemC.statements import SystemCSerializer_statements
from hwt.serializer.systemC.type import SystemCSerializer_type
from hwt.serializer.systemC.value import SystemCSerializer_value
from hwt.serializer.utils import maxStmId
from hwt.synthesizer.param import evalParam
class SystemCSerializer(GenericSerializer, SystemCSerializer_value, SystemCSerializer_type, SystemCSerializer_statements):
"""
Serialized used to convert HWT design to SystemC code
"""
fileExtension = '.cpp'
_keywords_dict = {kw: LangueKeyword() for kw in SYSTEMC_KEYWORDS}
env = Environment(loader=PackageLoader('hwt', 'serializer/systemC/templates'))
moduleTmpl = env.get_template('module.cpp')
mehtodTmpl = env.get_template("method.cpp")
ifTmpl = env.get_template("if.cpp")
switchStm env.get_template("switch.cpp")
@classmethod
def comment(cls, comentStr):
return "/* %s */" % comentStr
@classmethod
def PortItem(cls, pi, ctx):
d = cls.DIRECTION(pi.direction)
return "sc_%s<%s> %s;" % (d,
cls.HdlType(pi._dtype, ctx),
pi.name)
@classmethod
def DIRECTION(cls, d):
return d.name.lower()
@classmethod
def Architecture(cls, arch, ctx):
variables = []
procs = []
extraTypes = set()
extraTypes_serialized = []
scope = ctx.scope
childCtx = ctx.withIndent()
arch.variables.sort(key=lambda x: x.name)
arch.processes.sort(key=lambda x: (x.name, maxStmId(x)))
arch.componentInstances.sort(key=lambda x: x._name)
ports = list(map(lambda pi: cls.PortItem(pi, childCtx), arch.entity.ports))
for v in arch.variables:
t = v._dtype
# if type requires extra definition
if isinstance(t, Enum) and t not in extraTypes:
extraTypes.add(v._dtype)
extraTypes_serialized.append(cls.HdlType(t, scope, declaration=True))
v.name = scope.checkedName(v.name, v)
variables.append(v)
def serializeVar(v):
dv = evalParam(v.defaultVal)
if isinstance(dv, EnumVal):
dv = "%s.%s" % (dv._dtype.name, dv.val)
else:
dv = cls.Value(dv, None)
return v.name, cls.HdlType(v._dtype), dv
for p in arch.processes:
procs.append(cls.HWProcess(p, childCtx))
constants = []
return cls.moduleTmpl.render(
name=arch.getEntityName(),
constants=constants,
ports=ports,
signals=list(map(serializeVar, variables)),
extraTypes=extraTypes_serialized,
processes=procs,
processObjects=arch.processes,
componentInstances=arch.componentInstances,
)
| Python | 0.001621 |
dbec204b242ab643de162046ba73dca32043c6c2 | Implement __getattr__ to reduce code | space-age/space_age.py | space-age/space_age.py | class SpaceAge(object):
YEARS = {"on_earth": 1,
"on_mercury": 0.2408467,
"on_venus": 0.61519726,
"on_mars": 1.8808158,
"on_jupiter": 11.862615,
"on_saturn": 29.447498,
"on_uranus": 84.016846,
"on_neptune": 164.79132}
def __init__(self, seconds):
self.seconds = seconds
@property
def years(self):
return self.seconds/31557600
def __getattr__(self, on_planet):
if on_planet in SpaceAge.YEARS:
return lambda: round(self.years/SpaceAge.YEARS[on_planet], 2)
else:
raise AttributeError
| class SpaceAge(object):
def __init__(self, seconds):
self.seconds = seconds
@property
def years(self):
return self.seconds/31557600
def on_earth(self):
return round(self.years, 2)
def on_mercury(self):
return round(self.years/0.2408467, 2)
def on_venus(self):
return round(self.years/0.6151976, 2)
def on_mars(self):
return round(self.years/1.8808158, 2)
def on_jupiter(self):
return round(self.years/11.862615, 2)
def on_saturn(self):
return round(self.years/29.447498, 2)
def on_uranus(self):
return round(self.years/84.016846, 2)
def on_neptune(self):
return round(self.years/164.79132, 2)
| Python | 0.99731 |
a28bb36aeb887d11b9cf8391e03264a81b40b84a | add base entity construct | sparc/entity/entity.py | sparc/entity/entity.py | from BTrees.OOBTree import OOBTree
from zope.annotation.interfaces import IAnnotations
from zope.annotation.interfaces import IAnnotatable
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.component import adapts
from zope.component.factory import Factory
from zope.interface import implements
from zope.schema import getFields
from zope.schema.fieldproperty import FieldProperty
from interfaces import IIdentified
from interfaces import IEntity
from interfaces import IOwner
from interfaces import IUrlReference
from interfaces import IKeyphraseTags
class BaseSchemaObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class SparcEntity(object):
"""A basic Sparc entity"""
implements(IEntity, IAttributeAnnotatable)
def __init__(self, **kwargs):
self.id = kwargs['id'] # required
if 'name' in kwargs: self.name = kwargs['name']
if 'description' in kwargs: self.description = kwargs['description']
if 'details' in kwargs: self.details = kwargs['details']
#IEntity
id = FieldProperty(IIdentified['id'])
def getId(self):
return self.id
name = FieldProperty(IEntity['name'])
description = FieldProperty(IEntity['description'])
details = FieldProperty(IEntity['details'])
sparcEntityFactory = Factory(SparcEntity)
class SparcEntityOwnerForAnnotableObjects(object):
implements(IOwner)
adapts(IAnnotatable)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(context).\
setdefault('IOwner', OOBTree())
if 'owner' not in self.annotations:
self.annotations['owner'] = None
@property
def owner(self):
return self.annotations['owner']
@owner.setter
def owner(self, value):
getFields(IOwner)['owner'].validate(value)
self.annotations['owner'] = value
class SparcEntityUrlForAnnotableObjects(object):
implements(IUrlReference)
adapts(IAnnotatable)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(context).\
setdefault('IUrlReference', OOBTree())
if 'url' not in self.annotations:
self.annotations['url'] = None
@property
def url(self):
return self.annotations['url']
@url.setter
def url(self, value):
getFields(IUrlReference)['url'].validate(value)
self.annotations['url'] = value
class SparcEntityKeyphraseTagsForAnnotableObjects(object):
implements(IKeyphraseTags)
adapts(IAnnotatable)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(context).\
setdefault('IKeyphraseTags', OOBTree())
if 'tags' not in self.annotations:
self.annotations['tags'] = set()
@property
def tags(self):
return self.annotations['tags']
@tags.setter
def tags(self, value):
getFields(IKeyphraseTags)['tags'].validate(value)
self.annotations['tags'] = value | from BTrees.OOBTree import OOBTree
from zope.annotation.interfaces import IAnnotations
from zope.annotation.interfaces import IAnnotatable
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.component import adapts
from zope.component.factory import Factory
from zope.interface import implements
from zope import schema
from zope.schema import getFields
from zope.schema.fieldproperty import FieldProperty
from interfaces import IIdentified
from interfaces import IEntity
from interfaces import IOwner
from interfaces import IUrlReference
from interfaces import IKeyphraseTags
class SparcEntity(object):
"""A basic Sparc entity"""
implements(IEntity, IAttributeAnnotatable)
def __init__(self, **kwargs):
self.id = kwargs['id'] # required
if 'name' in kwargs: self.name = kwargs['name']
if 'description' in kwargs: self.description = kwargs['description']
if 'details' in kwargs: self.details = kwargs['details']
#IEntity
id = FieldProperty(IIdentified['id'])
def getId(self):
return self.id
name = FieldProperty(IEntity['name'])
description = FieldProperty(IEntity['description'])
details = FieldProperty(IEntity['details'])
sparcEntityFactory = Factory(SparcEntity)
class SparcEntityOwnerForAnnotableObjects(object):
implements(IOwner)
adapts(IAnnotatable)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(context).\
setdefault('IOwner', OOBTree())
if 'owner' not in self.annotations:
self.annotations['owner'] = None
@property
def owner(self):
return self.annotations['owner']
@owner.setter
def owner(self, value):
getFields(IOwner)['owner'].validate(value)
self.annotations['owner'] = value
class SparcEntityUrlForAnnotableObjects(object):
implements(IUrlReference)
adapts(IAnnotatable)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(context).\
setdefault('IUrlReference', OOBTree())
if 'url' not in self.annotations:
self.annotations['url'] = None
@property
def url(self):
return self.annotations['url']
@url.setter
def url(self, value):
getFields(IUrlReference)['url'].validate(value)
self.annotations['url'] = value
class SparcEntityKeyphraseTagsForAnnotableObjects(object):
implements(IKeyphraseTags)
adapts(IAnnotatable)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(context).\
setdefault('IKeyphraseTags', OOBTree())
if 'tags' not in self.annotations:
self.annotations['tags'] = set()
@property
def tags(self):
return self.annotations['tags']
@tags.setter
def tags(self, value):
getFields(IKeyphraseTags)['tags'].validate(value)
self.annotations['tags'] = value | Python | 0.000001 |
2732ac448d6a31678629324dc47f89a33ecd261b | Manage service choicefield display in inline form | billjobs/admin.py | billjobs/admin.py | from django import forms
from django.db.models import Q
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from .models import Bill, BillLine, Service, UserProfile
class BillLineInlineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BillLineInlineForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['service'].queryset = Service.objects.filter(Q(is_available=True) | Q(name=self.instance.service.name))
print(self.fields['service'].choices)
else:
self.fields['service'].queryset = Service.objects.filter(is_available=True)
class Meta:
model = BillLine
fields = ('service', 'quantity', 'total')
class BillLineInline(admin.TabularInline):
model = BillLine
extra = 1
form = BillLineInlineForm
class BillAdmin(admin.ModelAdmin):
readonly_fields = ('number', 'billing_date', 'amount')
inlines = [BillLineInline]
list_display = ('__str__', 'coworker_name', 'amount', 'billing_date',
'isPaid', 'pdf_file_url')
list_editable = ('isPaid',)
list_filter = ('isPaid', )
search_fields = ('user__first_name', 'user__last_name', 'number')
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
field = super(BillAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
if db_field.rel.to == User:
field.initial = request.user.id
field.label_from_instance = self.get_user_label
return field
def get_user_label(self, user):
name = user.get_full_name()
username = user.username
return (name and name != username and '%s (%s)' % (name, username)
or username)
def pdf_file_url(self, obj):
return '<a href="%s">%s.pdf</a>' % (reverse('generate-pdf',
kwargs={'bill_id': obj.id}), obj.number)
pdf_file_url.allow_tags = True
class UserProfileAdmin(admin.StackedInline):
model = UserProfile
class UserAdmin(UserAdmin):
inlines = (UserProfileAdmin, )
fieldsets = (
(None, {
'fields': ('username', 'password')
}),
(_('Personal info'), {
'fields': ('first_name', 'last_name', 'email')
}),
(_('Permissions'), {
'classes': ('collapse',),
'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')
}),
(_('Important dates'), {
'classes': ('collapse',),
'fields': ('last_login', 'date_joined')
})
)
list_display = ('username', 'get_full_name', 'email')
class ServiceAdmin(admin.ModelAdmin):
model = Service
list_display = ('__str__', 'price', 'is_available')
list_editable = ('is_available',)
list_filter = ('is_available',)
admin.site.register(Bill, BillAdmin)
admin.site.register(Service, ServiceAdmin)
# User have to be unregistered
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from .models import Bill, BillLine, Service, UserProfile
class BillLineInline(admin.TabularInline):
model = BillLine
extra = 1
class BillAdmin(admin.ModelAdmin):
readonly_fields = ('number', 'billing_date', 'amount')
inlines = [BillLineInline]
list_display = ('__str__', 'coworker_name', 'amount', 'billing_date',
'isPaid', 'pdf_file_url')
list_editable = ('isPaid',)
list_filter = ('isPaid', )
search_fields = ('user__first_name', 'user__last_name', 'number')
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
field = super(BillAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
if db_field.rel.to == User:
field.initial = request.user.id
field.label_from_instance = self.get_user_label
return field
def get_user_label(self, user):
name = user.get_full_name()
username = user.username
return (name and name != username and '%s (%s)' % (name, username)
or username)
def pdf_file_url(self, obj):
return '<a href="%s">%s.pdf</a>' % (reverse('generate-pdf',
kwargs={'bill_id': obj.id}), obj.number)
pdf_file_url.allow_tags = True
class UserProfileAdmin(admin.StackedInline):
model = UserProfile
class UserAdmin(UserAdmin):
inlines = (UserProfileAdmin, )
fieldsets = (
(None, {
'fields': ('username', 'password')
}),
(_('Personal info'), {
'fields': ('first_name', 'last_name', 'email')
}),
(_('Permissions'), {
'classes': ('collapse',),
'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')
}),
(_('Important dates'), {
'classes': ('collapse',),
'fields': ('last_login', 'date_joined')
})
)
list_display = ('username', 'get_full_name', 'email')
class ServiceAdmin(admin.ModelAdmin):
model = Service
list_display = ('__str__', 'price', 'is_available')
list_editable = ('is_available',)
list_filter = ('is_available',)
admin.site.register(Bill, BillAdmin)
admin.site.register(Service, ServiceAdmin)
# User have to be unregistered
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| Python | 0 |
8b275ccb96b8fe3c2c3919e11f08e988219a1e14 | Add the process PID in the logs | src/diamond/utils/log.py | src/diamond/utils/log.py | # coding=utf-8
import logging
import logging.config
import sys
import os
class DebugFormatter(logging.Formatter):
def __init__(self, fmt=None):
if fmt is None:
fmt = ('%(created)s\t' +
'[%(processName)s:%(process)d:%(levelname)s]\t' +
'%(message)s')
self.fmt_default = fmt
self.fmt_prefix = fmt.replace('%(message)s', '')
logging.Formatter.__init__(self, fmt)
def format(self, record):
self._fmt = self.fmt_default
if record.levelno in [logging.ERROR, logging.CRITICAL]:
self._fmt = ''
self._fmt += self.fmt_prefix
self._fmt += '%(message)s'
self._fmt += '\n'
self._fmt += self.fmt_prefix
self._fmt += '%(pathname)s:%(lineno)d'
return logging.Formatter.format(self, record)
def setup_logging(configfile, stdout=False):
log = logging.getLogger('diamond')
if stdout:
log.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler(sys.stdout)
streamHandler.setFormatter(DebugFormatter())
streamHandler.setLevel(logging.DEBUG)
log.addHandler(streamHandler)
else:
try:
if sys.version_info >= (2, 6):
logging.config.fileConfig(configfile,
disable_existing_loggers=False)
else:
# python <= 2.5 does not have disable_existing_loggers
# default was to always disable them, in our case we want to
# keep any logger created by handlers
logging.config.fileConfig(configfile)
for logger in logging.root.manager.loggerDict.values():
logger.disabled = 0
except Exception, e:
sys.stderr.write("Error occurs when initialize logging: ")
sys.stderr.write(str(e))
sys.stderr.write(os.linesep)
return log
| # coding=utf-8
import logging
import logging.config
import sys
import os
class DebugFormatter(logging.Formatter):
def __init__(self, fmt=None):
if fmt is None:
fmt = '%(created)s\t[%(processName)s:%(levelname)s]\t%(message)s'
self.fmt_default = fmt
self.fmt_prefix = fmt.replace('%(message)s', '')
logging.Formatter.__init__(self, fmt)
def format(self, record):
self._fmt = self.fmt_default
if record.levelno in [logging.ERROR, logging.CRITICAL]:
self._fmt = ''
self._fmt += self.fmt_prefix
self._fmt += '%(message)s'
self._fmt += '\n'
self._fmt += self.fmt_prefix
self._fmt += '%(pathname)s:%(lineno)d'
return logging.Formatter.format(self, record)
def setup_logging(configfile, stdout=False):
log = logging.getLogger('diamond')
if stdout:
log.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler(sys.stdout)
streamHandler.setFormatter(DebugFormatter())
streamHandler.setLevel(logging.DEBUG)
log.addHandler(streamHandler)
else:
try:
if sys.version_info >= (2, 6):
logging.config.fileConfig(configfile,
disable_existing_loggers=False)
else:
# python <= 2.5 does not have disable_existing_loggers
# default was to always disable them, in our case we want to
# keep any logger created by handlers
logging.config.fileConfig(configfile)
for logger in logging.root.manager.loggerDict.values():
logger.disabled = 0
except Exception, e:
sys.stderr.write("Error occurs when initialize logging: ")
sys.stderr.write(str(e))
sys.stderr.write(os.linesep)
return log
| Python | 0.000001 |
d2917cb2131b9b08fa6457b195606fcc0220eef1 | Fix X509 construcor | ctypescrypto/x509.py | ctypescrypto/x509.py | from ctypes import c_void_p
from ctypescrypto.bio import Membio
from ctypescrypto.pkey import PKey
from ctypescrypto.oid import Oid
from ctypescrypto.exception import LibCryptoError
from ctypescrypto import libcrypto
class X509Error(LibCryptoError):
pass
class X509Name:
def __init__(self,ptr):
self.ptr=ptr
def __del__(self):
libcrypto.X509_NAME_free(self.ptr)
def __str__(self):
b=Membio()
libcrypto.X509_NAME_print_ex(b.bio,self.ptr,0,PRING_FLAG)
return str(b).decode("utf-8")
def __len__(self):
return libcrypto.X509_NAME_entry_count(self.ptr)
def __getattr__(self,key):
if isinstance(key,Oid):
# Return list of strings
raise NotImpemented
elif isinstance(key,int):
# Return OID, sting tuple
raise NotImplemented
else:
raise TypeError("X509 name can be indexed with oids and numbers only")
def __setattr__(self,key,val):
pass
class X509_extlist:
def __init__(self,ptr):
self.ptr=ptr
def __del__(self):
libcrypto.X509_NAME_free(self.ptr)
def __str__(self):
raise NotImplemented
def __len__(self):
return libcrypto.X509_NAME_entry_count(self.ptr)
def __getattr__(self,key):
raise NotImplemented
def __setattr__(self,key,val):
raise NotImplemented
class X509:
def __init__(self,data=None,ptr=None,format="PEM"):
if ptr is not None:
if data is not None:
raise TypeError("Cannot use data and ptr simultaneously")
self.cert = ptr
elif data is None:
raise TypeError("data argument is required")
else:
b=Membio(data)
if format == "PEM":
self.cert=libcrypto.PEM_read_bio_X509(b.bio,None,None,None)
else:
self.cert=libcrypto.d2i_X509_bio(b.bio,None)
if self.cert is None:
raise X509Error("error reading certificate")
def __del__(self):
libcrypto.X509_free(self.cert)
def __str__(self):
""" Returns der string of the certificate """
b=Membio()
if libcrypto.i2d_X509_bio(b.bio,self.cert)==0:
raise X509Error("error serializing certificate")
@property
def pubkey(self):
"""EVP PKEy object of certificate public key"""
return PKey(ptr=libcrypto.X509_get_pubkey(self.cert,False))
def verify(self,key):
""" Verify self on given issuer key """
@property
def subject(self):
""" X509Name for certificate subject name """
return X509Name(libcrypto.X509_get_subject_name(self.cert))
@property
def issuer(self):
""" X509Name for certificate issuer name """
return X509Name(libcrypto.X509_get_issuer_name(self.cert))
@property
def serial(self):
""" Serial number of certificate as integer """
return
@property
def startDate(self):
""" Certificate validity period start date """
raise NotImplemented
@property
def endDate(self):
""" Certificate validity period end date """
raise NotImplemented
def extensions(self):
raise NotImplemented
| from ctypes import c_void_p
from ctypescrypto.bio import Membio
from ctypescrypto.pkey import PKey
from ctypescrypto.oid import Oid
from ctypescrypto.exception import LibCryptoError
from ctypescrypto import libcrypto
class X509Error(LibCryptoError):
pass
class X509Name:
def __init__(self,ptr):
self.ptr=ptr
def __del__(self):
libcrypto.X509_NAME_free(self.ptr)
def __str__(self):
b=Membio()
libcrypto.X509_NAME_print_ex(b.bio,self.ptr,0,PRING_FLAG)
return str(b).decode("utf-8")
def __len__(self):
return libcrypto.X509_NAME_entry_count(self.ptr)
def __getattr__(self,key):
if isinstance(key,Oid):
# Return list of strings
raise NotImpemented
elif isinstance(key,int):
# Return OID, sting tuple
raise NotImplemented
else:
raise TypeError("X509 name can be indexed with oids and numbers only")
def __setattr__(self,key,val):
pass
class X509_extlist:
def __init__(self,ptr):
self.ptr=ptr
def __del__(self):
libcrypto.X509_NAME_free(self.ptr)
def __str__(self):
raise NotImplemented
def __len__(self):
return libcrypto.X509_NAME_entry_count(self.ptr)
def __getattr__(self,key):
raise NotImplemented
def __setattr__(self,key,val):
raise NotImplemented
class X509:
def __init__(self,data=None,ptr=None,format="PEM"):
if ptr is not None:
if data is not None:
raise TypeError("Cannot use data and ptr simultaneously")
self.cert = ptr
elif data is None:
raise TypeError("data argument is required")
b=Membio(data)
if format == "PEM":
self.cert=libcrypto.PEM_read_bio_X509(b.bio,None,None,None)
else:
self.cert=libcrypto.d2i_X509_bio(b.bio,None)
if self.cert is None:
raise X509Error("error reading certificate")
def __del__(self):
libcrypto.X509_free(self.cert)
def __str__(self):
""" Returns der string of the certificate """
b=Membio()
if libcrypto.i2d_X509_bio(b.bio,self.cert)==0:
raise X509Error("error serializing certificate")
@property
def pubkey(self):
"""EVP PKEy object of certificate public key"""
return PKey(ptr=libcrypto.X509_get_pubkey(self.cert,False))
def verify(self,key):
""" Verify self on given issuer key """
@property
def subject(self):
""" X509Name for certificate subject name """
return X509Name(libcrypto.X509_get_subject_name(self.cert))
@property
def issuer(self):
""" X509Name for certificate issuer name """
return X509Name(libcrypto.X509_get_issuer_name(self.cert))
@property
def serial(self):
""" Serial number of certificate as integer """
return
@property
def startDate(self):
""" Certificate validity period start date """
raise NotImplemented
@property
def endDate(self):
""" Certificate validity period end date """
raise NotImplemented
def extensions(self):
raise NotImplemented
| Python | 0.000018 |
419db3c559d836d6ba77c758212a55051e6c8fab | Add FIXME for module as function arg | test_obj_dict_tools.py | test_obj_dict_tools.py | from obj_dict_tools import *
from nose.tools import raises
@dict_fields(['name', 'size'])
class Simple:
def __init__(self, name=None, size=None):
self.name = name
self.size = size
@dict_fields(['first', 'second'])
class Pair:
def __init__(self, first=None, second=None):
self.first = first
self.second = second
def test_simple_class_to_dict():
s = Simple('foo', 100)
d = to_dict(s)
assert d['__class__'] == 'Simple'
assert d['name'] == 'foo'
assert d['size'] == 100
def test_simple_class_from_dict():
d = {'__class__': 'Simple', 'name': 'foo', 'size': 100}
# FIXME: Explicitly passing in a module is undesirable.
s = from_dict(d, globals())
assert isinstance(s, Simple)
assert s.name == 'foo'
assert s.size == 100
def test_null_fields_to_dict():
p = Pair()
d = to_dict(p)
assert d['__class__'] == 'Pair'
assert not 'first' in d
assert not 'second' in d
def test_list_to_dict():
ss = [Simple('foo', 100), Simple('bar', 200)]
d = to_dict(ss)
assert len(d) == 2
assert d[0]['__class__'] == 'Simple'
assert d[0]['name'] == 'foo'
assert d[0]['size'] == 100
assert d[1]['__class__'] == 'Simple'
assert d[1]['name'] == 'bar'
assert d[1]['size'] == 200
def test_list_field_to_dict():
p = Pair([1, 2, 3, 4, 5], Simple('b', 200))
d = to_dict(p)
assert d['__class__'] == 'Pair'
assert len(d['first']) == 5
assert d['second']['__class__'] == 'Simple'
assert d['second']['name'] == 'b'
assert d['second']['size'] == 200
@raises(Exception)
def test_decorator_rejects_underscore_prefixes():
@dict_fields(['_p'])
class bad_attribute_defined:
pass
| from obj_dict_tools import *
from nose.tools import raises
@dict_fields(['name', 'size'])
class Simple:
def __init__(self, name=None, size=None):
self.name = name
self.size = size
@dict_fields(['first', 'second'])
class Pair:
def __init__(self, first=None, second=None):
self.first = first
self.second = second
def test_simple_class_to_dict():
s = Simple('foo', 100)
d = to_dict(s)
assert d['__class__'] == 'Simple'
assert d['name'] == 'foo'
assert d['size'] == 100
def test_simple_class_from_dict():
d = {'__class__': 'Simple', 'name': 'foo', 'size': 100}
s = from_dict(d, globals())
assert isinstance(s, Simple)
assert s.name == 'foo'
assert s.size == 100
def test_null_fields_to_dict():
p = Pair()
d = to_dict(p)
assert d['__class__'] == 'Pair'
assert not 'first' in d
assert not 'second' in d
def test_list_to_dict():
ss = [Simple('foo', 100), Simple('bar', 200)]
d = to_dict(ss)
assert len(d) == 2
assert d[0]['__class__'] == 'Simple'
assert d[0]['name'] == 'foo'
assert d[0]['size'] == 100
assert d[1]['__class__'] == 'Simple'
assert d[1]['name'] == 'bar'
assert d[1]['size'] == 200
def test_list_field_to_dict():
p = Pair([1, 2, 3, 4, 5], Simple('b', 200))
d = to_dict(p)
assert d['__class__'] == 'Pair'
assert len(d['first']) == 5
assert d['second']['__class__'] == 'Simple'
assert d['second']['name'] == 'b'
assert d['second']['size'] == 200
@raises(Exception)
def test_decorator_rejects_underscore_prefixes():
@dict_fields(['_p'])
class bad_attribute_defined:
pass
| Python | 0 |
b6f57d8aaeff8f85c89c381b972113810a468412 | use lat, lon | models.py | models.py | from random import choice, randint
import urllib2, json
from pushbullet import PushBullet
from os import environ
'''
Checks if the current login attempt is a security threat or not.
Performs the required action in each case
'''
def is_safe(form, ip, geocoded_ip, mandrill):
ip = ip
latitude = form.get('latitude', None)
longitude = form.get('longitude', None)
os = form.get('os', None)
mobile = form.get('isMobile', None)
browser = form.get('browser', None)
if latitude == None and longitude == None:
latitude = geocoded_ip['lat']
longitude = geocoded_ip['lon']
safety_status = choice(range(-1, 2))
auth_code = '%06d' % randint(0,999999)
if safety_status < 1:
send_push("Confirm your access", "Suspicious access detected from IP %s, confirm with code %s" % (ip, auth_code))
send_mail(mandrill, 'zen@itram.es', latitude, longitude, ip, auth_code)
return {
'safety_code': safety_status,
'token': auth_code,
'debug': [ip, latitude, longitude, os, mobile, browser]
}# send SMS, mail...
def send_push(message, body, lat=40.4086, lon=-3.6922, pushbullet_token=environ.get('PUSHBULLET_TOKEN')):
""" Sends a foo location to Pushbullet """
pb = PushBullet(pushbullet_token)
success, push = pb.push_link("Login from suspicious location detected now!", "http://maps.google.com/maps?&z=10&q=%f,+%f&ll=%f+%f" % (lat, lon, lat, lon), "A suspicious login has appeared, try to guess who is it")
return success
def send_mail(mandrill, to, latitude, longitude, ip, safety_code):
gmaps_uri = "http://maps.googleapis.com/maps/api/staticmap?center=%s,%s&zoom=15&size=400x400&markers=color:red%%7Clabel:S%%7C%s,%s&sensor=true" % (latitude, longitude, latitude, longitude)
mandrill.send_email(
from_email='someone@yourdomain.com',
subject='[LogLock] Suspicious login attempt detected',
to=[{'email': to}],
html='''
An access attempt has been logged from a suspicious location:
<p><img src="%s" /></p>
<p>IP address: %s</p>
Please confirm it is you with the following code: <b>%s</b>
''' %(gmaps_uri, ip, safety_code)
)
def geocode_ip(ip_addr):
""" Geocodes a given IP Address """
data = json.load(urllib2.urlopen("http://ip-api.com/json/%s" % ip_addr))
print "Geocoded data: %s" % data
return data
| from random import choice, randint
import urllib2, json
from pushbullet import PushBullet
from os import environ
'''
Checks if the current login attempt is a security threat or not.
Performs the required action in each case
'''
def is_safe(form, ip, geocoded_ip, mandrill):
ip = ip
latitude = form.get('latitude', None)
longitude = form.get('longitude', None)
os = form.get('os', None)
mobile = form.get('isMobile', None)
browser = form.get('browser', None)
if latitude == None and longitude == None:
latitude = geocoded_ip['latitude']
longitude = geocoded_ip['longitude']
safety_status = choice(range(-1, 2))
auth_code = '%06d' % randint(0,999999)
if safety_status < 1:
send_push("Confirm your access", "Suspicious access detected from IP %s, confirm with code %s" % (ip, auth_code))
send_mail(mandrill, 'zen@itram.es', latitude, longitude, ip, auth_code)
return {
'safety_code': safety_status,
'token': auth_code,
'debug': [ip, latitude, longitude, os, mobile, browser]
}# send SMS, mail...
def send_push(message, body, lat=40.4086, lon=-3.6922, pushbullet_token=environ.get('PUSHBULLET_TOKEN')):
""" Sends a foo location to Pushbullet """
pb = PushBullet(pushbullet_token)
success, push = pb.push_link("Login from suspicious location detected now!", "http://maps.google.com/maps?&z=10&q=%f,+%f&ll=%f+%f" % (lat, lon, lat, lon), "A suspicious login has appeared, try to guess who is it")
return success
def send_mail(mandrill, to, latitude, longitude, ip, safety_code):
gmaps_uri = "http://maps.googleapis.com/maps/api/staticmap?center=%s,%s&zoom=15&size=400x400&markers=color:red%%7Clabel:S%%7C%s,%s&sensor=true" % (latitude, longitude, latitude, longitude)
mandrill.send_email(
from_email='someone@yourdomain.com',
subject='[LogLock] Suspicious login attempt detected',
to=[{'email': to}],
html='''
An access attempt has been logged from a suspicious location:
<p><img src="%s" /></p>
<p>IP address: %s</p>
Please confirm it is you with the following code: <b>%s</b>
''' %(gmaps_uri, ip, safety_code)
)
def geocode_ip(ip_addr):
""" Geocodes a given IP Address """
data = json.load(urllib2.urlopen("http://ip-api.com/json/%s" % ip_addr))
print "Geocoded data: %s" % data
return data
| Python | 0.000137 |
4ffef0e45bf6581d3ec0ce67836fc106cf5e689f | complete rough draft of client script | client/heartbeat.py | client/heartbeat.py | # Heartbeat Client for SPARCS Services
# Version 0.0.1 - 2016-09-18
import json
import psutil
import pprint
import time
import requests
NETWORK_REPORT = False
SERVICE_NAME = ''
SERVICE_KEY = ''
API_ENDPOINT = 'https://'
# get cpu info
# - user, system, idle (1 sec)
def get_cpu():
cpu = psutil.cpu_times_percent(interval=1, percpu=False)
info = {
'user': cpu.user,
'system': cpu.system,
'idle': cpu.idle,
}
return info
# get memory info
# - virtual (total, available, used)
# - swap (total, used)
def get_mem():
virt_mem = psutil.virtual_memory()
swap_mem = psutil.swap_memory()
info = {
'virtual': {
'total': virt_mem.total,
'available': virt_mem.available,
'used': virt_mem.used,
},
'swap': {
'total': swap_mem.total,
'used': swap_mem.used,
},
}
return info
# get disk info
# - devide (mountpoint, fstype, total, used)
def get_disk():
info = {}
disk_list = psutil.disk_partitions()
for disk in disk_list:
usage = psutil.disk_usage(disk.mountpoint)
info[disk.device] = {
'mountpoint': disk.mountpoint,
'fstype': disk.fstype,
'total': usage.total,
'used': usage.used,
}
return info
# get network info
# - bytes (sent, recv) (1 sec)
# - packet (sent, recv) (1 sec)
def get_net():
info = {
'bytes_sent': 0,
'bytes_recv': 0,
'packets_sent': 0,
'packets_recv': 0,
}
c1 = psutil.net_io_counters()
time.sleep(1)
c2 = psutil.net_io_counters()
info['bytes_sent'] = c2.bytes_sent - c1.bytes_sent
info['bytes_recv'] = c2.bytes_recv - c1.bytes_recv
info['packets_sent'] = c2.packets_sent - c1.packets_sent
info['packets_recv'] = c2.packets_recv - c1.packets_recv
return info
# get process info
# - name (top 5 cpu usages)
# - name (top 5 mem usages)
def get_proc():
proc_list = []
for p in psutil.process_iter():
try:
proc_list.append({
'name': p.name(),
'cpu': p.cpu_percent(),
'mem': p.memory_percent(),
})
except:
pass
def top_n(n, l, key):
return map(lambda x: x['name'],
list(reversed(sorted(l, key=key)))[:n])
info = {
'top_cpu': top_n(5, proc_list, lambda x: x['cpu']),
'top_mem': top_n(5, proc_list, lambda x: x['mem']),
}
return info
# get system info
# - boot time
def get_sys():
info = {
'boot_time': psutil.boot_time()
}
return info
# report info to server
def report(info):
payload = {
'server': {
'name': SERVICE_NAME,
'key': SERVICE_KEY,
},
'info': info,
'errors': {},
}
for i in range(3):
timestamp = int(time.time())
try:
req = requests.post(API_ENDPOINT, data=json.dumps(payload))
resp = req.json()
if 'success' in resp:
return resp['success']
else:
error = resp['error'] if 'error' in resp else 'unknown'
payload['errors'][timestamp] = error
except Exception as e:
payload['errors'][timestamp] = str(e)
return False
# our main routine
def main():
info = {
'cpu': get_cpu(),
'mem': get_mem(),
'disk': get_disk(),
'net': get_net(),
'proc': get_proc(),
'sys': get_sys(),
}
if NETWORK_REPORT:
success = report(info)
# ME: if fail, what should we do?
# ???: nothing except just eating popcorn
else:
pprint.pprint(info)
if __name__ == '__main__':
main()
| # Heartbeat Client for SPARCS Services
# Version 0.0.1 - 2016-09-18
import json
import psutil
import time
import requests
SERVICE_NAME = ''
SERVICE_KEY = ''
API_ENDPOINT = 'https://'
# get cpu info
# - user, system, idle (3 sec)
def get_cpu():
pass
# get memory info
# - virtual (total, available, used)
# - swap (total, used)
def get_mem():
pass
# get disk info
# - devide (mountpoint, fstype, total, used)
def get_disk():
pass
# get network info
# - bytes (sent, recv) (3 sec)
# - packet (sent, recv) (3 sec)
def get_net():
pass
# get process info
# - name (top 5 cpu usages)
# - name (top 5 mem usages)
def get_proc():
pass
# get system info
# - boot time
def get_sys():
pass
# our main routine
def main():
info = {
'cpu': get_cpu(),
'mem': get_mem(),
'disk': get_disk(),
'net': get_net(),
'proc': get_proc(),
'sys': get_sys(),
}
payload = {
'server': {
'name': SERVICE_NAME,
'key': SERVICE_KEY,
},
'info': info,
'errors': {},
}
for i in range(3):
try:
req = requests.post(API_ENDPOINT, data=json.dumps(payload))
resp = req.json()
if resp['success']:
return
except Exception as e:
timestamp = int(time.time())
payload['errors'][time.time()] = str(e)
# ME: what should we do?
# ???: nothing except just eating popcorn
if __name__ == '__main__':
main()
| Python | 0 |
a0f96b2b25d309c8934ffe9a197f3d66c9097b52 | replace phone to email for privacy | models.py | models.py | from app import db
from datetime import datetime
class Profile(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
gender = db.Column(db.String(1))
age = db.Column(db.Integer())
email = db.Column(db.String(50), unique=True)
description = db.Column(db.String(300))
date = db.Column(db.DateTime, default = datetime.utcnow)
# get profile by id : Profile.query.get(id)
# get profile by param : Profile.query.filter_by(name = "").all()
| from app import db
from datetime import datetime
class Profile(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
gender = db.Column(db.String(1))
age = db.Column(db.Integer())
description = db.Column(db.String(300))
date = db.Column(db.DateTime, default = datetime.utcnow)
# get profile by id : Profile.query.get(id)
# get profile by param : Profile.query.filter_by(name = "").all()
| Python | 0.000189 |
9c8bde1e57ad2e70c7b3b9188bff9b90e85434e6 | Send email and push | models.py | models.py | from random import choice, randint
import urllib2, json
from pushbullet import PushBullet
from os import environ
'''
Checks if the current login attempt is a security threat or not.
Performs the required action in each case
'''
def is_safe(form, ip, mandrill):
ip = ip
latitude = form.get('latitude', None)
longitude = form.get('longitude', None)
os = form.get('os', None)
mobile = form.get('isMobile', None)
browser = form.get('browser', None)
# check against our database
safety_status = choice(range(-1, 2))
if safety_status < 1:
auth_code = '%06d' % randint(0,999999)
send_push("Confirm your access", "Suspicious access detected from IP %s, confirm with code %s" % (ip, auth_code))
send_mail(mandrill, 'zen@itram.es', latitude, longitude, ip, auth_code)
return {
'safety_code': safety_status,
'token': auth_code,
'debug': [ip, latitude, longitude, os, mobile, browser]
}# send SMS, mail...
def send_push(message, body, lat=40.4086, lon=-3.6922, pushbullet_token=environ.get('PUSHBULLET_TOKEN')):
""" Sends a foo location to Pushbullet """
pb = PushBullet(pushbullet_token)
success, push = pb.push_link("Login from suspicious location detected now!", "http://maps.google.com/maps?&z=10&q=%f,+%f&ll=%f+%f" % (lat, lon, lat, lon), "A suspicious login has appeared, try to guess who is it")
return success
def send_mail(mandrill, to, latitude, longitude, ip, safety_code):
gmaps_uri = "http://maps.googleapis.com/maps/api/staticmap?center=%s,%s&zoom=15&size=400x400&markers=color:red%%7Clabel:S%%7C%s,%s&sensor=true" % (latitude, longitude, latitude, longitude)
mandrill.send_email(
from_email='someone@yourdomain.com',
subject='[LogLock] Suspicious login attempt detected',
to=[{'email': to}],
html='''
An access attempt has been logged from a suspicious location:
<p><img src="%s" /></p>
<p>IP address: %s</p>
Please confirm it is you with the following code: <b>%s</b>
''' %(gmaps_uri, ip, safety_code)
)
def geocode_ip(ip_addr):
""" Geocodes a given IP Address """
data = json.load(urllib2.urlopen("http://ip-api.com/json/%s" % ip_addr))
print "Geocoded data: %s" % data
return data
| from random import choice
import urllib2, json
from pushbullet import PushBullet
'''
Checks if the current login attempt is a security threat or not.
Performs the required action in each case
'''
def is_safe(form, ip, mandrill):
ip = ip
geo = form.get('geo', None)
os = form.get('os', None)
browser = form.get('browser', None)
# check against our database
safety_status = choice(range(-1, 2))
return {
'safety_code': safety_status,
'token': 'fake_token',
'debug': [ip, geo, os, browser]} # send SMS, mail...
def send_push(pushbullet_token, message, lat=40.4086, lon=-3.6922):
""" Sends a foo location to Pushbullet """
pb = PushBullet(pushbullet_token)
success, push = pb.push_link("Login from suspicious location detected now!", "http://maps.google.com/maps?&z=10&q=%f,+%f&ll=%f+%f" % (lat, lon, lat, lon), "A suspicious login has appeared, try to guess who is it")
return success
def send_mail(mandrill, to):
mandrill.send_email(
from_email='someone@yourdomain.com',
subject='Blocked suspicious login attempt @twitter',
to=[{'email': to}],
text='''An attack has been detected and blocked (LND=>NY login with 5h difference).
Authorize this access by [...]'''
)
def geocode_ip(ip_addr):
""" Geocodes a given IP Address """
data = json.load(urllib2.urlopen("http://ip-api.com/json/%s" % ip_addr))
print "Geocoded data: %s" % data
return data
| Python | 0 |
ea8cbcaf41f01a46390882fbc99e6e14d70a49d1 | Create an API auth token for every newly created user | src/mmw/apps/user/models.py | src/mmw/apps/user/models.py | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
"""
Create an auth token for every newly created user.
"""
if created:
Token.objects.create(user=instance)
class ItsiUserManager(models.Manager):
def create_itsi_user(self, user, itsi_id):
itsi_user = self.create(user=user, itsi_id=itsi_id)
return itsi_user
class ItsiUser(models.Model):
user = models.OneToOneField(User, primary_key=True)
itsi_id = models.IntegerField()
objects = ItsiUserManager()
def __unicode__(self):
return unicode(self.user.username)
| # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
class ItsiUserManager(models.Manager):
def create_itsi_user(self, user, itsi_id):
itsi_user = self.create(user=user, itsi_id=itsi_id)
return itsi_user
class ItsiUser(models.Model):
user = models.OneToOneField(User, primary_key=True)
itsi_id = models.IntegerField()
objects = ItsiUserManager()
def __unicode__(self):
return unicode(self.user.username)
| Python | 0 |
8a304e7c09a2e6a01454d686a882a8e139be7a3d | make async dbus call, return result in deferred method | dbus-tools/dbus-send.py | dbus-tools/dbus-send.py | ###############################################################################
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys, dbus, json
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor, defer
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
log.startLogging(sys.stdout)
###############################################################################
class DbusSendService:
@exportRpc
def dbusSend(self, list):
if len(list) < 5:
raise Exception("Error: expected arguments: bus, destination, object, interface, message, [args])")
if list[0] == "session":
bus = dbus.SessionBus()
elif list[0] == "system":
bus = dbus.SystemBus()
else:
raise Exception("Error: invalid bus: %s" % list[0])
# parse JSON arg list
args = []
if len(list) == 6:
args = json.loads(list[5])
# get dbus proxy
object = bus.get_object(list[1], list[2])
method = object.get_dbus_method(list[4], list[3])
# deferred reply to return dbus results
self.request = defer.Deferred()
# dbus method async call
method(*args, reply_handler=self.dbusSuccess, error_handler=self.dbusError)
return self.request
def dbusSuccess(self, *result):
# return JSON string result array
self.request.callback(json.dumps(result))
def dbusError(self, error):
# raise exception in the deferred reply context
self.request.addCallback(self.raiseError)
self.request.callback(error)
def raiseError(self, error):
raise Exception(error)
###############################################################################
class DbusSendServerProtocol(WampServerProtocol):
def onSessionOpen(self):
# create dbus-send service instance and register it for RPC.
self.dbusSendService = DbusSendService()
self.registerForRpc(self.dbusSendService)
###############################################################################
if __name__ == '__main__':
port = "9000"
if len(sys.argv) == 2:
port = sys.argv[1]
uri = "ws://localhost:" + port
factory = WampServerFactory(uri, debugWamp = True)
factory.protocol = DbusSendServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
| ###############################################################################
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys, dbus, json
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor, defer
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
log.startLogging(sys.stdout)
###############################################################################
class DbusSendService:
@exportRpc
def dbusSend(self, list):
if len(list) < 5:
raise Exception("Error: expected arguments: bus, destination, object, interface, message, [args])")
if list[0] == "session":
bus = dbus.SessionBus()
elif list[0] == "system":
bus = dbus.SystemBus()
else:
raise Exception("Error: invalid bus: %s" % list[0])
# parse JSON arg list
args = []
if len(list) == 6:
args = json.loads(list[5])
# get dbus proxy
object = bus.get_object(list[1], list[2])
method = object.get_dbus_method(list[4], list[3])
# defer dbus call
request = defer.Deferred()
request.addCallback(self.dbusCallback)
reactor.callLater(0, request.callback, (method, args))
return request
def dbusCallback(self, list):
# call dbus method
result = list[0](*list[1])
# return JSON string result
return json.dumps(result)
###############################################################################
class DbusSendServerProtocol(WampServerProtocol):
def onSessionOpen(self):
# create dbus-send service instance and register it for RPC.
self.dbusSendService = DbusSendService()
self.registerForRpc(self.dbusSendService)
###############################################################################
if __name__ == '__main__':
port = "9000"
if len(sys.argv) == 2:
port = sys.argv[1]
uri = "ws://localhost:" + port
factory = WampServerFactory(uri, debugWamp = True)
factory.protocol = DbusSendServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
| Python | 0.000001 |
1bbfce6c64debb9f9377d1a912604f32ace9dca5 | Update runsegment.py | bin/runsegment.py | bin/runsegment.py | #!/usr/bin/python
import os
import numpy as np
import shutil
import common
from segment import normalizefile, segmentfile
def runAll(args):
print('\n\n\nYou have requested to normalize and segment bincounts files')
print('\tWARNING:')
print('\t\tIF USING ANY REFERENCES OTHER THAN THOSE I PROVIDE I CANNOT GUARANTEE RESULT ACCURACY')
print('\n')
#Set up environment#
args.CountDirectory = common.fixDirName(args.CountDirectory)
lowessDir = os.path.dirname(args.CountDirectory[:-1]) + '/LowessBinCounts/'
segmentDir = os.path.dirname(args.CountDirectory[:-1]) + '/Segments/'
tempDir = os.path.dirname(args.CountDirectory[:-1]) + '/Temp/'
if args.output:
lowessDir = common.fixDirName(args.output) + 'LowessBinCounts/'
segmentDir = common.fixDirName(args.output) + 'Segments/'
common.makeDir(lowessDir)
if not args.normalizeonly:
common.makeDir(segmentDir)
common.makeDir(tempDir)
sampleFiles = common.getSampleList(args.CountDirectory, args.samples, 'bincounts')
info = common.importInfoFile(args.infofile, args.columns, 'normalize')
if args.infofile:
refArray = info
else:
thisDtype = info
refArray = np.array(
[ (os.path.basename(x)[:-14], 'unk', 1,) for x in sampleFiles],
dtype=thisDtype)
sampleDict = {x: [y for y in sampleFiles if x == os.path.basename(y)[:len(x)]][0] for x in refArray['name']}
#Run normalization for all samples#
methodDict = {x: False for x in np.unique(refArray['method'])}
# methodDict['NA'] = False
sampleNormMethodDict = {x: 'NA' for x in methodDict}
if not args.gconly:
for i in methodDict:
refSlice = refArray[(refArray['method'] == i) & (refArray['cells'] == 1)]
print refSlice
methodSamples = [sampleDict[x] for x in refSlice['name']]
print methodSamples
methodDict[i] = normalizefile.runMakeMethodRef(args.species, methodSamples, i, lowessDir)
print methodDict
if methodDict[i] != False:
for j in refSlice['name']:
sampleNormMethodDict[j] = i
print methodDict
print sampleNormMethodDict
raise SystemExit
#run multiprocessing for gc (+ method) correction
normArgs = [(args.species, sampleDict[x], methodDict[sampleNormMethodDict[x]], lowessDir + x + '.lowess.txt') for x in sampleDict.keys()]
common.daemon(normalizefile.runNormalizeOne, normArgs, 'normalize bincount files')
print('\nNormalization complete\n\n\n')
# if args.normalizeonly:
# shutil.rmtree(tempDir[:-1])
# return 0
#Run CBS for all samples#
if not args.normalizeonly:
segArgs = [(x, args.species, tempDir, lowessDir, segmentDir) for x in refArray['name']]
common.daemon(segmentfile.segmentOne, segArgs, 'segment bincount data')
shutil.rmtree(tempDir[:-1])
print('\nSegmentation complete\n\n\n')
| #!/usr/bin/python
import os
import numpy as np
import shutil
import common
from segment import normalizefile, segmentfile
def runAll(args):
print('\n\n\nYou have requested to normalize and segment bincounts files')
print('\tWARNING:')
print('\t\tIF USING ANY REFERENCES OTHER THAN THOSE I PROVIDE I CANNOT GUARANTEE RESULT ACCURACY')
print('\n')
#Set up environment#
args.CountDirectory = common.fixDirName(args.CountDirectory)
lowessDir = os.path.dirname(args.CountDirectory[:-1]) + '/LowessBinCounts/'
segmentDir = os.path.dirname(args.CountDirectory[:-1]) + '/Segments/'
tempDir = os.path.dirname(args.CountDirectory[:-1]) + '/Temp/'
if args.output:
lowessDir = common.fixDirName(args.output) + 'LowessBinCounts/'
segmentDir = common.fixDirName(args.output) + 'Segments/'
common.makeDir(lowessDir)
if not args.normalizeonly:
common.makeDir(segmentDir)
common.makeDir(tempDir)
sampleFiles = common.getSampleList(args.CountDirectory, args.samples, 'bincounts')
info = common.importInfoFile(args.infofile, args.columns, 'normalize')
if args.infofile:
refArray = info
else:
thisDtype = info
refArray = np.array(
[ (os.path.basename(x)[:-14], 'unk', 1,) for x in sampleFiles],
dtype=thisDtype)
sampleDict = {x: [y for y in sampleFiles if x == os.path.basename(y)[:len(x)]][0] for x in refArray['name']}
#Run normalization for all samples#
methodDict = {x: False for x in np.unique(refArray['method'])}
methodDict['NA'] = False
print methodDict
sampleNormMethodDict = {x: 'NA' for x in methodDict}
print sampleNormMethodDict
if not args.gconly:
for i in methodDict:
refSlice = refArray[(refArray['method'] == i) & (refArray['cells'] == 1)]
methodSamples = [sampleDict[x] for x in refSlice['name']]
methodDict[i] = normalizefile.runMakeMethodRef(args.species, methodSamples, i, lowessDir)
if methodDict[i] != False:
for j in refSlice['name']:
sampleNormMethodDict[j] = i
print methodDict
print sampleNormMethodDict
raise SystemExit
#run multiprocessing for gc (+ method) correction
normArgs = [(args.species, sampleDict[x], methodDict[sampleNormMethodDict[x]], lowessDir + x + '.lowess.txt') for x in sampleDict.keys()]
common.daemon(normalizefile.runNormalizeOne, normArgs, 'normalize bincount files')
print('\nNormalization complete\n\n\n')
# if args.normalizeonly:
# shutil.rmtree(tempDir[:-1])
# return 0
#Run CBS for all samples#
if not args.normalizeonly:
segArgs = [(x, args.species, tempDir, lowessDir, segmentDir) for x in refArray['name']]
common.daemon(segmentfile.segmentOne, segArgs, 'segment bincount data')
shutil.rmtree(tempDir[:-1])
print('\nSegmentation complete\n\n\n')
| Python | 0.000001 |
4f27b87b9ee600c7c1c05ae9fece549b9c18e2a4 | Create default instance in middleware if not found. | speeches/middleware.py | speeches/middleware.py | from instances.models import Instance
class InstanceMiddleware:
"""This middleware sets request.instance to the default Instance for all
requests. This can be changed/overridden if you use SayIt in a way that
uses multiple instances."""
def process_request(self, request):
request.instance, _ = Instance.objects.get_or_create(label='default')
request.is_user_instance = (
request.user.is_authenticated() and
( request.instance in request.user.instances.all() or request.user.is_superuser )
)
| from instances.models import Instance
class InstanceMiddleware:
"""This middleware sets request.instance to the default Instance for all
requests. This can be changed/overridden if you use SayIt in a way that
uses multiple instances."""
def process_request(self, request):
request.instance = Instance.objects.get(label='default')
request.is_user_instance = (
request.user.is_authenticated() and
( request.instance in request.user.instances.all() or request.user.is_superuser )
)
| Python | 0 |
cb54c04049050d853f874e92c83061aad911a19a | Update qotd-parser.py | intelmq/bots/parsers/shadowserver/qotd-parser.py | intelmq/bots/parsers/shadowserver/qotd-parser.py | import csv
import StringIO
from intelmq.lib.bot import Bot, sys
from intelmq.lib.event import Event
from intelmq.bots import utils
class ShadowServerQotdParserBot(Bot):
def process(self):
report = self.receive_message()
if report:
report = report.strip()
columns = {
"timestamp": "source_time",
"ip": "source_ip",
"protocol" : "transport_protocol",
"port" : "source_port",
"hostname": "source_reverse_dns",
"tag" : "__IGNORE__",
"quote" : "__IGNORE__",
"asn": "source_asn",
"geo": "source_cc",
"region" : "source_region",
"city" : "source_city"
}
rows = csv.DictReader(StringIO.StringIO(report))
for row in rows:
event = Event()
for key, value in row.items():
key = columns[key]
if not value:
continue
value = value.strip()
if key is "__IGNORE__" or key is "__TDB__":
continue
event.add(key, value)
event.add('feed', 'shadowserver-qotd')
event.add('type', 'vulnerable service')
event.add('application_protocol', 'qotd')
event = utils.parse_source_time(event, "source_time")
event = utils.generate_observation_time(event, "observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = ShadowServerQotdParserBot(sys.argv[1])
bot.start()
| import csv
import StringIO
from intelmq.lib.bot import Bot, sys
from intelmq.lib.event import Event
from intelmq.bots import utils
class ShadowServerQotdParserBot(Bot):
def process(self):
report = self.receive_message()
if report:
report = report.strip()
columns = {
"timestamp": "source_time",
"ip": "source_ip",
"protocol" : "transport_protocol",
"port" : "source_port",
"hostname": "source_reverse_dns",
"tag" : "__IGNORE__",
"quote" : "__IGNORE__",
"asn": "source_asn",
"geo": "source_cc",
"region" : "source_region",
"city" : "source_city"
}
rows = csv.DictReader(StringIO.StringIO(report))
for row in rows:
event = Event()
for key, value in row.items():
key = columns[key]
if not value:
continue
value = value.strip()
if key is "__IGNORE__" or key is "__TDB__":
continue
event.add(key, value)
event.add('feed', 'shadowserver-qotd')
event.add('type', 'vulnerable service')
event.add('protocol', 'qotd')
event = utils.parse_source_time(event, "source_time")
event = utils.generate_observation_time(event, "observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = ShadowServerQotdParserBot(sys.argv[1])
bot.start()
| Python | 0.000001 |
2980c30a8de6cbdf5d22bb269c16f8c5ad499ba8 | Fix build output (dots on one line) | build.py | build.py | import re
import argparse
from utils import build_docker_image
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nocache', action='store_true', default=False)
args = parser.parse_args()
content = ''
stream = build_docker_image(nocache=args.nocache)
for item in stream:
buff = item.get('stream', item.get('status'))
if not content or re.search('.+\[[. ]*$', content):
content += buff
if not re.search('.+\[[. ]*$', content):
print(content)
content = ''
if __name__ == '__main__':
main()
| import argparse
from utils import build_docker_image
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nocache', action='store_true', default=False)
args = parser.parse_args()
for item in build_docker_image(nocache=args.nocache):
print item.values()[0]
if __name__ == '__main__':
main()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.