commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
00203b7fbf8ed8f8728ce18838acb21eb6224723 | Disable unused code | flumotion/test/test_common_vfs.py | flumotion/test/test_common_vfs.py | # -*- Mode: Python; test-case-name: flumotion.test.test_common_planet -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import errno
import os
from flumotion.common.interfaces import IDirectory
from flumotion.common.testsuite import TestCase
from flumotion.common.vfs import listDirectory
class VFSTest(TestCase):
def setUp(self):
self.path = os.path.dirname(__file__)
def testListDirectory(self):
try:
d = listDirectory(self.path)
except AssertionError:
# missing backends
return
def done(directory):
self.failUnless(IDirectory.providedBy(directory))
self.assertEqual(directory.filename,
os.path.basename(self.path))
self.assertEqual(directory.getPath(), self.path)
self.failUnless(directory.iconNames)
d.addCallback(done)
return d
| # -*- Mode: Python; test-case-name: flumotion.test.test_common_planet -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import errno
import os
from flumotion.common.interfaces import IDirectory
from flumotion.common.testsuite import TestCase
from flumotion.common.vfs import listDirectory
class VFSTest(TestCase):
def setUp(self):
self.path = os.path.dirname(__file__)
try:
os.mkdir(os.path.join(self.path, 'access-denied'), 000)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def tearDown(self):
os.rmdir(os.path.join(self.path, 'access-denied'))
def testListDirectory(self):
try:
d = listDirectory(self.path)
except AssertionError:
# missing backends
return
def done(directory):
self.failUnless(IDirectory.providedBy(directory))
self.assertEqual(directory.filename,
os.path.basename(self.path))
self.assertEqual(directory.getPath(), self.path)
self.failUnless(directory.iconNames)
d.addCallback(done)
return d
| Python | 0.000003 |
ce5f152a5769e90cb87a05a2bcc1beb837d6cdb4 | Simplify code | chainer/functions/pooling/pooling_2d.py | chainer/functions/pooling/pooling_2d.py | import collections
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
def _check_cudnn_acceptable_type(x_dtype):
return _cudnn_version >= 3000 or x_dtype != numpy.float16
def _pair(x):
if isinstance(x, collections.Iterable):
return x
return x, x
class Pooling2D(function.Function):
"""Base class of pooling function over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0, cover_all=True,
use_cudnn=True):
if stride is None:
stride = ksize
self.kh, self.kw = _pair(ksize)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 4
)
def forward_gpu(self, x):
# Implementation using cudnn
x = x[0]
n, c, h, w = x.shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x.dtype)
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
libcudnn.poolingForward(
handle, pool_desc.value, one.data, x_desc.value,
x.data.ptr, zero.data, y_desc.value, y.data.ptr)
self.y = y
return y,
def backward_gpu(self, x, gy):
# Implementation using cudnn
x = x[0]
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
# Pooling of cuDNNv2 does not seem to support non-contiguous gradients
gy = cuda.cupy.ascontiguousarray(gy[0])
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(gy)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
gx = cuda.cupy.empty_like(x)
libcudnn.poolingBackward(
handle, pool_desc.value, one.data, y_desc.value,
self.y.data.ptr, y_desc.value, gy.data.ptr, x_desc.value,
x.data.ptr, zero.data, x_desc.value, gx.data.ptr)
return gx,
def create_pool_desc(self):
raise NotImplementedError()
| import collections
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
def _check_cudnn_acceptable_type(x_dtype):
return _cudnn_version >= 3000 or x_dtype != numpy.float16
def _pair(x):
if isinstance(x, collections.Iterable):
return x
return x, x
class Pooling2D(function.Function):
"""Base class of pooling function over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0, cover_all=True,
use_cudnn=True):
if stride is None:
stride = ksize
self.kh, self.kw = _pair(ksize)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 4
)
def forward_gpu(self, x):
# Implementation using cudnn
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
dtype = x[0].dtype
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=dtype)
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
x_desc = cudnn.create_tensor_descriptor(x[0])
y_desc = cudnn.create_tensor_descriptor(y)
oz_dtype = dtype if dtype != 'e' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
libcudnn.poolingForward(
handle, pool_desc.value, one.data, x_desc.value,
x[0].data.ptr, zero.data, y_desc.value, y.data.ptr)
self.y = y
return y,
def backward_gpu(self, x, gy):
# Implementation using cudnn
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
# Pooling of cuDNNv2 does not seem to support non-contiguous gradients
gy = cuda.cupy.ascontiguousarray(gy[0])
x_desc = cudnn.create_tensor_descriptor(x[0])
y_desc = cudnn.create_tensor_descriptor(gy)
dtype = x[0].dtype
oz_dtype = dtype if dtype != 'e' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
gx = cuda.cupy.empty_like(x[0])
libcudnn.poolingBackward(
handle, pool_desc.value, one.data, y_desc.value,
self.y.data.ptr, y_desc.value, gy.data.ptr, x_desc.value,
x[0].data.ptr, zero.data, x_desc.value, gx.data.ptr)
return gx,
def create_pool_desc(self):
raise NotImplementedError()
| Python | 0.041259 |
8a010b6601ecf2eed216b3aa0b604a0985d06544 | Update chainer/training/extensions/__init__.py | chainer/training/extensions/__init__.py | chainer/training/extensions/__init__.py | # import classes and functions
from chainer.training.extensions._snapshot import snapshot # NOQA
from chainer.training.extensions._snapshot import snapshot_object # NOQA
from chainer.training.extensions.computational_graph import DumpGraph # NOQA
from chainer.training.extensions.evaluator import Evaluator # NOQA
from chainer.training.extensions.exponential_shift import ExponentialShift # NOQA
from chainer.training.extensions.fail_on_nonnumber import FailOnNonNumber # NOQA
from chainer.training.extensions.inverse_shift import InverseShift # NOQA
from chainer.training.extensions.linear_shift import LinearShift # NOQA
from chainer.training.extensions.log_report import LogReport # NOQA
from chainer.training.extensions.micro_average import MicroAverage # NOQA
from chainer.training.extensions.multistep_shift import MultistepShift # NOQA
from chainer.training.extensions.parameter_statistics import ParameterStatistics # NOQA
from chainer.training.extensions.plot_report import PlotReport # NOQA
from chainer.training.extensions.polynomial_shift import PolynomialShift # NOQA
from chainer.training.extensions.print_report import PrintReport # NOQA
from chainer.training.extensions.progress_bar import ProgressBar # NOQA
from chainer.training.extensions.step_shift import StepShift # NOQA
from chainer.training.extensions.value_observation import observe_lr # NOQA
from chainer.training.extensions.value_observation import observe_value # NOQA
from chainer.training.extensions.variable_statistics_plot import VariableStatisticsPlot # NOQA
from chainer.training.extensions.warmup_shift import WarmupShift # NOQA
# Alias
from chainer.training.extensions.computational_graph import DumpGraph as dump_graph # NOQA
| # import classes and functions
from chainer.training.extensions._snapshot import snapshot # NOQA
from chainer.training.extensions._snapshot import snapshot_object # NOQA
from chainer.training.extensions.computational_graph import DumpGraph # NOQA
from chainer.training.extensions.evaluator import Evaluator # NOQA
from chainer.training.extensions.exponential_shift import ExponentialShift # NOQA
from chainer.training.extensions.fail_on_nonnumber import FailOnNonNumber # NOQA
from chainer.training.extensions.inverse_shift import InverseShift # NOQA
from chainer.training.extensions.linear_shift import LinearShift # NOQA
from chainer.training.extensions.log_report import LogReport # NOQA
from chainer.training.extensions.micro_average import MicroAverage # NOQA
from chainer.training.extensions.multistep_shift import MultistepShift # NOQA
from chainer.training.extensions.parameter_statistics import ParameterStatistics # NOQA
from chainer.training.extensions.plot_report import PlotReport # NOQA
from chainer.training.extensions.polynomial_shift import PolynomialShift # NOQA
from chainer.training.extensions.print_report import PrintReport # NOQA
from chainer.training.extensions.progress_bar import ProgressBar # NOQA
from chainer.training.extensions.step_shift import StepShift # NOQA
from chainer.training.extensions.value_observation import observe_lr # NOQA
from chainer.training.extensions.value_observation import observe_value # NOQA
from chainer.training.extensions.variable_statistics_plot import VariableStatisticsPlot # NOQA
from chainer.training.extensions.warmup_shift import WarmupShift # NOQA
# Aliase
from chainer.training.extensions.computational_graph import DumpGraph as dump_graph # NOQA
| Python | 0 |
105dc001e5e0f2e1e02409cf77e5b31f0df30ffe | put on two lines | core/dbt/task/clean.py | core/dbt/task/clean.py | import os.path
import os
import shutil
from dbt.task.base import ProjectOnlyTask
from dbt.logger import GLOBAL_LOGGER as logger
class CleanTask(ProjectOnlyTask):
def __is_project_path(self, path):
proj_path = os.path.abspath('.')
return not os.path.commonprefix(
[proj_path, os.path.abspath(path)]
) == proj_path
def __is_protected_path(self, path):
"""
This function identifies protected paths, so as not to clean them.
"""
abs_path = os.path.abspath(path)
protected_paths = self.config.source_paths + \
self.config.test_paths + ['.']
protected_abs_paths = [os.path.abspath for p in protected_paths]
return abs_path in set(protected_abs_paths) or \
self.__is_project_path(abs_path)
def run(self):
"""
This function takes all the paths in the target file
and cleans the project paths that are not protected.
"""
for path in self.config.clean_targets:
logger.info("Checking {}/*".format(path))
if not self.__is_protected_path(path):
shutil.rmtree(path, True)
logger.info(" Cleaned {}/*".format(path))
else:
logger.info("ERROR: not cleaning {}/* because it is "
"protected".format(path))
logger.info("Finished cleaning all paths.")
| import os.path
import os
import shutil
from dbt.task.base import ProjectOnlyTask
from dbt.logger import GLOBAL_LOGGER as logger
class CleanTask(ProjectOnlyTask):
def __is_project_path(self, path):
proj_path = os.path.abspath('.')
return not os.path.commonprefix(
[proj_path, os.path.abspath(path)]
) == proj_path
def __is_protected_path(self, path):
"""
This function identifies protected paths, so as not to clean them.
"""
abs_path = os.path.abspath(path)
protected_paths = self.config.source_paths + \
self.config.test_paths + ['.']
protected_abs_paths = [os.path.abspath for p in protected_paths]
return abs_path in set(protected_abs_paths) or \
self.__is_project_path(abs_path)
def run(self):
"""
This function takes all the paths in the target file
and cleans the project paths that are not protected.
"""
for path in self.config.clean_targets:
logger.info("Checking {}/*".format(path))
if not self.__is_protected_path(path):
shutil.rmtree(path, True)
logger.info(" Cleaned {}/*".format(path))
else:
logger.info("ERROR: not cleaning {}/* because it is protected".format(path))
logger.info("Finished cleaning all paths.")
| Python | 0.000006 |
5860d28e0f8f08f1bf4ca2426c08a83b687f33f8 | Fix Python3 issue (#173) | mod/tools/node.py | mod/tools/node.py | """wrapper for node.js, only check_exists"""
import subprocess
name = 'node'
platforms = ['linux']
optional = True
not_found = 'node.js required for emscripten cross-compiling'
#------------------------------------------------------------------------------
def check_exists(fips_dir) :
try :
out = subprocess.check_output(['node', '--version'])
if not out.startswith(b'v') :
log.warn("this doesn't look like a proper node.js 'node'")
return False
return True
except (OSError, subprocess.CalledProcessError) :
return False
| """wrapper for node.js, only check_exists"""
import subprocess
name = 'node'
platforms = ['linux']
optional = True
not_found = 'node.js required for emscripten cross-compiling'
#------------------------------------------------------------------------------
def check_exists(fips_dir) :
try :
out = subprocess.check_output(['node', '--version'])
if not out.startswith('v') :
log.warn("this doesn't look like a proper node.js 'node'")
return False
return True
except (OSError, subprocess.CalledProcessError) :
return False
| Python | 0 |
1794fb8865241e22a5af30020111471ea00a6250 | check if you the plugins really need to be reloaded | InvenTree/plugin/admin.py | InvenTree/plugin/admin.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.apps import apps
import plugin.models as models
def plugin_update(queryset, new_status: bool):
"""general function for bulk changing plugins"""
apps_changed = False
# run through all plugins in the queryset as the save method needs to be overridden
for model in queryset:
if model.active is not new_status:
model.active = new_status
apps_changed = True
model.save(no_reload=True)
# reload plugins if they changed
if apps_changed:
app = apps.get_app_config('plugin')
app.reload_plugins()
@admin.action(description='Activate plugin(s)')
def plugin_activate(modeladmin, request, queryset):
"""activate a set of plugins"""
plugin_update(queryset, True)
@admin.action(description='Deactivate plugin(s)')
def plugin_deactivate(modeladmin, request, queryset):
"""deactivate a set of plugins"""
plugin_update(queryset, False)
class PluginConfigAdmin(admin.ModelAdmin):
"""Custom admin with restricted id fields"""
readonly_fields = ["key", "name", ]
list_display = ['key', 'name', 'active', ]
actions = [plugin_activate, plugin_deactivate, ]
admin.site.register(models.PluginConfig, PluginConfigAdmin)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.apps import apps
import plugin.models as models
def plugin_update(queryset, new_status: bool):
"""general function for bulk changing plugins"""
for model in queryset:
model.active = new_status
model.save(no_reload=True)
app = apps.get_app_config('plugin')
app.reload_plugins()
@admin.action(description='Activate plugin(s)')
def plugin_activate(modeladmin, request, queryset):
"""activate a set of plugins"""
plugin_update(queryset, True)
@admin.action(description='Deactivate plugin(s)')
def plugin_deactivate(modeladmin, request, queryset):
"""deactivate a set of plugins"""
plugin_update(queryset, False)
class PluginConfigAdmin(admin.ModelAdmin):
"""Custom admin with restricted id fields"""
readonly_fields = ["key", "name", ]
list_display = ['key', 'name', 'active', ]
actions = [plugin_activate, plugin_deactivate, ]
admin.site.register(models.PluginConfig, PluginConfigAdmin)
| Python | 0 |
0682e3b4ce5a23683ac1bd7d68cb69e3df92cc99 | Fix bug when hyp_length == 0 | nematus/metrics/sentence_bleu.py | nematus/metrics/sentence_bleu.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from math import exp
from operator import mul
from collections import defaultdict
from scorer import Scorer
from reference import Reference
class SentenceBleuScorer(Scorer):
"""
Scores SmoothedBleuReference objects.
"""
def __init__(self, argument_string):
"""
Initialises metric-specific parameters.
"""
Scorer.__init__(self, argument_string)
# use n-gram order of 4 by default
if not 'n' in self._arguments.keys():
self._arguments['n'] = 4
def set_reference(self, reference_tokens):
"""
Sets the reference against hypotheses are scored.
"""
self._reference = SentenceBleuReference(
reference_tokens,
self._arguments['n']
)
class SentenceBleuReference(Reference):
"""
Smoothed sentence-level BLEU as as proposed by Lin and Och (2004).
Implemented as described in (Chen and Cherry, 2014).
"""
def __init__(self, reference_tokens, n=4):
"""
@param reference the reference translation that hypotheses shall be
scored against. Must be an iterable of tokens (any
type).
@param n maximum n-gram order to consider.
"""
Reference.__init__(self, reference_tokens)
self.n = n
# preprocess reference
self._reference_length = len(self._reference_tokens)
self._reference_ngrams = self._get_ngrams(self._reference_tokens, self.n)
def _get_ngrams(self, tokens, max_n):
"""
Extracts all n-grams of order 1 up to (and including) @param max_n from
a list of @param tokens.
"""
n_grams = []
for n in range(1, max_n+1):
n_grams.append(defaultdict(int))
for n_gram in zip(*[tokens[i:] for i in range(n)]):
n_grams[n-1][n_gram] += 1
return n_grams
def score(self, hypothesis_tokens):
"""
Scores @param hypothesis against this reference.
@return the smoothed sentence-level BLEU score: 1.0 is best, 0.0 worst.
"""
def product(iterable):
return reduce(mul, iterable, 1)
def ngram_precisions(ref_ngrams, hyp_ngrams):
precisions = []
for n in range(1, self.n+1):
overlap = 0
for ref_ngram, ref_ngram_count in ref_ngrams[n-1].iteritems():
if ref_ngram in hyp_ngrams[n-1]:
overlap += min(ref_ngram_count, hyp_ngrams[n-1][ref_ngram])
hyp_length = len(hypothesis_tokens)-n+1
if n >= 2:
# smoothing as proposed by Lin and Och (2004),
# implemented as described in (Chen and Cherry, 2014)
overlap += 1
hyp_length += 1
precisions.append(overlap/hyp_length if hyp_length > 0 else 0.0)
return precisions
def brevity_penalty(ref_length, hyp_length):
return min(1.0, exp(1-(ref_length/hyp_length)))
# preprocess hypothesis
hypothesis_length = len(hypothesis_tokens)
hypothesis_ngrams = self._get_ngrams(hypothesis_tokens, self.n)
# calculate n-gram precision for all orders
np = ngram_precisions(self._reference_ngrams, hypothesis_ngrams)
# calculate brevity penalty
bp = brevity_penalty(self._reference_length, hypothesis_length)
# compose final BLEU score
return product(np)**(1/self.n) * bp
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from math import exp
from operator import mul
from collections import defaultdict
from scorer import Scorer
from reference import Reference
class SentenceBleuScorer(Scorer):
"""
Scores SmoothedBleuReference objects.
"""
def __init__(self, argument_string):
"""
Initialises metric-specific parameters.
"""
Scorer.__init__(self, argument_string)
# use n-gram order of 4 by default
if not 'n' in self._arguments.keys():
self._arguments['n'] = 4
def set_reference(self, reference_tokens):
"""
Sets the reference against hypotheses are scored.
"""
self._reference = SentenceBleuReference(
reference_tokens,
self._arguments['n']
)
class SentenceBleuReference(Reference):
"""
Smoothed sentence-level BLEU as as proposed by Lin and Och (2004).
Implemented as described in (Chen and Cherry, 2014).
"""
def __init__(self, reference_tokens, n=4):
"""
@param reference the reference translation that hypotheses shall be
scored against. Must be an iterable of tokens (any
type).
@param n maximum n-gram order to consider.
"""
Reference.__init__(self, reference_tokens)
self.n = n
# preprocess reference
self._reference_length = len(self._reference_tokens)
self._reference_ngrams = self._get_ngrams(self._reference_tokens, self.n)
def _get_ngrams(self, tokens, max_n):
"""
Extracts all n-grams of order 1 up to (and including) @param max_n from
a list of @param tokens.
"""
n_grams = []
for n in range(1, max_n+1):
n_grams.append(defaultdict(int))
for n_gram in zip(*[tokens[i:] for i in range(n)]):
n_grams[n-1][n_gram] += 1
return n_grams
def score(self, hypothesis_tokens):
"""
Scores @param hypothesis against this reference.
@return the smoothed sentence-level BLEU score: 1.0 is best, 0.0 worst.
"""
def product(iterable):
return reduce(mul, iterable, 1)
def ngram_precisions(ref_ngrams, hyp_ngrams):
precisions = []
for n in range(1, self.n+1):
overlap = 0
for ref_ngram, ref_ngram_count in ref_ngrams[n-1].iteritems():
if ref_ngram in hyp_ngrams[n-1]:
overlap += min(ref_ngram_count, hyp_ngrams[n-1][ref_ngram])
hyp_length = len(hypothesis_tokens)-n+1
if n >= 2:
# smoothing as proposed by Lin and Och (2004),
# implemented as described in (Chen and Cherry, 2014)
overlap += 1
hyp_length += 1
precisions.append(overlap/hyp_length)
return precisions
def brevity_penalty(ref_length, hyp_length):
return min(1.0, exp(1-(ref_length/hyp_length)))
# preprocess hypothesis
hypothesis_length = len(hypothesis_tokens)
hypothesis_ngrams = self._get_ngrams(hypothesis_tokens, self.n)
# calculate n-gram precision for all orders
np = ngram_precisions(self._reference_ngrams, hypothesis_ngrams)
# calculate brevity penalty
bp = brevity_penalty(self._reference_length, hypothesis_length)
# compose final BLEU score
return product(np)**(1/self.n) * bp
| Python | 0.00033 |
86d4b4a241887bfcd990180a6486cb8054bf514c | Add 'TODO' for YAML editor. | core/io/pyslvs_yaml.py | core/io/pyslvs_yaml.py | # -*- coding: utf-8 -*-
"""YAML format processing function."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2018"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from typing import Dict, Any
import yaml
from core.QtModules import QObject
from core import main_window as mn
class YamlEditor(QObject):
"""YAML reader and writer."""
def __init__(self, parent: 'mn.MainWindow'):
super(YamlEditor, self).__init__(parent)
# Check file changed function.
self.__check_file_changed = parent.checkFileChanged
# Check workbook saved function.
self.__workbook_saved = parent.workbookSaved
# Call to get point expressions.
self.__point_expr_func = parent.EntitiesPoint.expression
# Call to get link data.
self.__link_expr_func = parent.EntitiesLink.colors
# Call to get storage data.
self.__storage_data_func = parent.getStorage
# Call to get collections data.
self.__collect_data_func = parent.CollectionTabPage.collect_data
# Call to get triangle data.
self.__triangle_data_func = parent.CollectionTabPage.triangle_data
# Call to get inputs variables data.
self.__inputs_data_func = parent.InputsWidget.inputPairs
# Call to get algorithm data.
self.__algorithm_data_func = parent.DimensionalSynthesis.mechanism_data
# Call to get path data.
self.__path_data_func = parent.InputsWidget.pathData
# Call to load collections data.
self.__load_collect_func = parent.CollectionTabPage.StructureWidget.addCollections
# Call to load triangle data.
self.__load_triangle_func = parent.CollectionTabPage.TriangularIterationWidget.addCollections
# Call to load inputs variables data.
self.__load_inputs_func = parent.InputsWidget.addInputsVariables
# Call after loaded algorithm results.
self.__load_algorithm_func = parent.DimensionalSynthesis.loadResults
# Call after loaded paths.
self.__load_path_func = parent.InputsWidget.loadPaths
# Add empty links function.
self.__add_links_func = parent.addEmptyLinks
# Parse function.
self.__parse_func = parent.parseExpression
# Clear function for main window.
self.__clear_func = parent.clear
# Add storage function.
self.__add_storage_func = parent.addMultipleStorage
self.file_name = ""
def reset(self):
"""Reset some settings."""
self.file_name = ""
def save(self):
"""Save YAML file."""
data = {}
# TODO: Data structure.
yaml_script = yaml.dump(data, default_flow_style=True)
with open(self.file_name, 'w') as f:
f.write(yaml_script)
def save_as(self, file_name: str):
"""Save to a new YAML file."""
self.file_name = file_name
self.save()
def load(self, file_name: str):
"""Load YAML file."""
self.file_name = file_name
with open(self.file_name) as f:
yaml_script = f.read()
data: Dict[str, Any] = yaml.load(yaml_script)
# TODO: Load function.
| # -*- coding: utf-8 -*-
"""YAML format processing function."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2018"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
import yaml
from core.QtModules import QObject
from core import main_window as mn
class YamlEditor(QObject):
"""YAML reader and writer."""
def __init__(self, parent: 'mn.MainWindow'):
super(YamlEditor, self).__init__(parent)
# Check file changed function.
self.__check_file_changed = parent.checkFileChanged
# Check workbook saved function.
self.__workbook_saved = parent.workbookSaved
# Call to get point expressions.
self.__point_expr_func = parent.EntitiesPoint.expression
# Call to get link data.
self.__link_expr_func = parent.EntitiesLink.colors
# Call to get storage data.
self.__storage_data_func = parent.getStorage
# Call to get collections data.
self.__collect_data_func = parent.CollectionTabPage.collect_data
# Call to get triangle data.
self.__triangle_data_func = parent.CollectionTabPage.triangle_data
# Call to get inputs variables data.
self.__inputs_data_func = parent.InputsWidget.inputPairs
# Call to get algorithm data.
self.__algorithm_data_func = parent.DimensionalSynthesis.mechanism_data
# Call to get path data.
self.__path_data_func = parent.InputsWidget.pathData
# Call to load collections data.
self.__load_collect_func = parent.CollectionTabPage.StructureWidget.addCollections
# Call to load triangle data.
self.__load_triangle_func = parent.CollectionTabPage.TriangularIterationWidget.addCollections
# Call to load inputs variables data.
self.__load_inputs_func = parent.InputsWidget.addInputsVariables
# Call after loaded algorithm results.
self.__load_algorithm_func = parent.DimensionalSynthesis.loadResults
# Call after loaded paths.
self.__load_path_func = parent.InputsWidget.loadPaths
# Add empty links function.
self.__add_links_func = parent.addEmptyLinks
# Parse function.
self.__parse_func = parent.parseExpression
# Clear function for main window.
self.__clear_func = parent.clear
# Add storage function.
self.__add_storage_func = parent.addMultipleStorage
self.file_name = ""
def reset(self):
"""Reset some settings."""
self.file_name = ""
def save(self):
"""Save YAML file."""
def save_as(self, file_name: str):
"""Save to a new YAML file."""
def load(self, file_name: str):
"""Load YAML file."""
self.file_name = file_name
| Python | 0 |
3747f72e81a3c143145dcbbdcfbfc13b292f19e1 | add filter plot test | neurodsp/tests/test_plts_filt.py | neurodsp/tests/test_plts_filt.py | """
test_plts_filt.py
Test filtering plots
"""
import numpy as np
from neurodsp.filt import filter_signal
from neurodsp.plts.filt import plot_frequency_response
def test_plot_frequency_response():
"""
Confirm frequency response plotting function works
"""
# Test plotting through the filter function
sig = np.random.randn(2000)
fs = 1000
sig_filt, kernel = filter_signal(sig, fs, 'bandpass', (8, 12),
plot_freq_response=True, return_kernel=True, verbose=False)
# Test calling frequency response plot directly
plot_frequency_response(fs, kernel)
assert True
| """
test_burst.py
Test burst detection functions
"""
import os
import numpy as np
import neurodsp
from .util import _load_example_data
def test_detect_bursts_dual_threshold():
"""
Confirm consistency in burst detection results on a generated neural signal
"""
# Load data and ground-truth filtered signal
sig = _load_example_data(data_idx=1)
fs = 1000
f_range = (13, 30)
# Load past burst findings
bursting_true = np.load(os.path.dirname(neurodsp.__file__) +
'/tests/data/sample_data_1_burst_deviation.npy')
# Detect bursts with different algorithms
bursting = neurodsp.detect_bursts_dual_threshold(sig, fs, f_range, (0.9, 2))
assert np.isclose(np.sum(bursting - bursting_true), 0)
| Python | 0 |
d57c3ad63b737fda4632f5896c8049329bcd4fe2 | Make this test work under Windows as well. | Lib/test/test_fpformat.py | Lib/test/test_fpformat.py | '''
Tests for fpformat module
Nick Mathewson
'''
from test_support import run_unittest
import unittest
from fpformat import fix, sci, NotANumber
StringType = type('')
# Test the old and obsolescent fpformat module.
#
# (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and
# sci(n,d) == "%.*e"%(d,n)
# for all reasonable numeric n and d, except that sci gives 3 exponent
# digits instead of 2.
#
# Differences only occur for unreasonable n and d. <.2 wink>)
class FpformatTest(unittest.TestCase):
def checkFix(self, n, digits):
result = fix(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*f" % (digits, float(n))
self.assertEquals(result, expected)
def checkSci(self, n, digits):
result = sci(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*e" % (digits, float(n))
# add the extra 0 if needed
num, exp = expected.split("e")
if len(exp) < 4:
exp = exp[0] + "0" + exp[1:]
expected = "%se%s" % (num, exp)
self.assertEquals(result, expected)
def test_basic_cases(self):
self.assertEquals(fix(100.0/3, 3), '33.333')
self.assertEquals(sci(100.0/3, 3), '3.333e+001')
def test_reasonable_values(self):
for d in range(7):
for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10):
for realVal in (val, 1.0/val, -val, -1.0/val):
self.checkFix(realVal, d)
self.checkSci(realVal, d)
def test_failing_values(self):
# Now for 'unreasonable n and d'
self.assertEquals(fix(1.0, 1000), '1.'+('0'*1000))
self.assertEquals(sci("1"+('0'*1000), 0), '1e+1000')
# This behavior is inconsistent. sci raises an exception; fix doesn't.
yacht = "Throatwobbler Mangrove"
self.assertEquals(fix(yacht, 10), yacht)
try:
sci(yacht, 10)
except NotANumber:
pass
else:
self.fail("No exception on non-numeric sci")
run_unittest(FpformatTest)
| '''
Tests for fpformat module
Nick Mathewson
'''
from test_support import run_unittest
import unittest
from fpformat import fix, sci, NotANumber
StringType = type('')
# Test the old and obsolescent fpformat module.
#
# (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and
# sci(n,d) == "%.*e"%(d,n)
# for all reasonable numeric n and d, except that sci gives 3 exponent
# digits instead of 2.
#
# Differences only occur for unreasonable n and d. <.2 wink>)
class FpformatTest(unittest.TestCase):
def checkFix(self, n, digits):
result = fix(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*f" % (digits, float(n))
self.assertEquals(result, expected)
def checkSci(self, n, digits):
result = sci(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*e" % (digits, float(n))
# add the extra 0
expected = expected[:-2]+'0'+expected[-2:]
self.assertEquals(result, expected)
def test_basic_cases(self):
self.assertEquals(fix(100.0/3, 3), '33.333')
self.assertEquals(sci(100.0/3, 3), '3.333e+001')
def test_reasonable_values(self):
for d in range(7):
for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10):
for realVal in (val, 1.0/val, -val, -1.0/val):
self.checkFix(realVal, d)
self.checkSci(realVal, d)
def test_failing_values(self):
# Now for 'unreasonable n and d'
self.assertEquals(fix(1.0, 1000), '1.'+('0'*1000))
self.assertEquals(sci("1"+('0'*1000), 0), '1e+1000')
# This behavior is inconsistent. sci raises an exception; fix doesn't.
yacht = "Throatwobbler Mangrove"
self.assertEquals(fix(yacht, 10), yacht)
try:
sci(yacht, 10)
except NotANumber:
pass
else:
self.fail("No exception on non-numeric sci")
run_unittest(FpformatTest)
| Python | 0.000001 |
728012090d3f24411e460b99f68f9b5754d38480 | Handle character substitution in html formatter | npc/formatters/html.py | npc/formatters/html.py | """
Markdown formatter for creating a page of characters.
Has a single entry point `dump`.
"""
import codecs
import html
import markdown
import tempfile
from .. import util
from mako.template import Template
def dump(characters, outstream, *, include_metadata=None, metadata=None, prefs=None):
"""
Create a markdown character listing
Args:
characters (list): Character info dicts to show
outstream (stream): Output stream
include_metadata (string|None): Whether to include metadata, and what
format to use.What kind of metadata to include, if any. Accepts
values of 'mmd', 'yaml', or 'yfm'. Metadata will always include a
title and creation date.
metadata (dict): Additional metadata to insert. Ignored unless
include_metadata is set. The keys 'title', and 'created' will
overwrite the generated values for those keys.
prefs (Settings): Settings object. Used to get the location of template
files.
Returns:
A util.Result object. Openable will not be set.
"""
if not metadata:
metadata = {}
# encode as ascii unless our stream has an opinion
try:
encoding = outstream.encoding
except AttributeError:
encoding = 'ascii'
modstream = codecs.getwriter(encoding)(outstream, errors='xmlcharrefreplace')
if include_metadata:
# load and render template
header_file = prefs.get("templates.listing.header.{}".format(include_metadata))
if not header_file:
return util.Result(
False,
errmsg="Unrecognized metadata format option '{}'".format(include_metadata),
errcode=6)
header_template = Template(filename=header_file)
modstream.write(header_template.render(metadata=metadata))
else:
modstream.write("<!DOCTYPE html>\n<html>\n<head></head>\n<body>\n")
with tempfile.TemporaryDirectory() as tempdir:
for char in characters:
body_file = prefs.get("templates.listing.character.html.{}".format(char.get_type_key()))
if not body_file:
body_file = prefs.get("templates.listing.character.html.default")
body_template = Template(filename=body_file, module_directory=tempdir)
modstream.write(
markdown.markdown(
body_template.render(
character=char.copy_and_alter(html.escape)),
['markdown.extensions.extra']
))
modstream.write("</body>\n</html>\n")
return util.Result(True)
| """
Markdown formatter for creating a page of characters.
Has a single entry point `dump`.
"""
import html
import markdown
import tempfile
from .. import util
from mako.template import Template
def dump(characters, outstream, *, include_metadata=None, metadata=None, prefs=None):
"""
Create a markdown character listing
Args:
characters (list): Character info dicts to show
outstream (stream): Output stream
include_metadata (string|None): Whether to include metadata, and what
format to use.What kind of metadata to include, if any. Accepts
values of 'mmd', 'yaml', or 'yfm'. Metadata will always include a
title and creation date.
metadata (dict): Additional metadata to insert. Ignored unless
include_metadata is set. The keys 'title', and 'created' will
overwrite the generated values for those keys.
prefs (Settings): Settings object. Used to get the location of template
files.
Returns:
A util.Result object. Openable will not be set.
"""
if not metadata:
metadata = {}
if include_metadata:
# load and render template
header_file = prefs.get("templates.listing.header.{}".format(include_metadata))
if not header_file:
return util.Result(
False,
errmsg="Unrecognized metadata format option '{}'".format(include_metadata),
errcode=6)
header_template = Template(filename=header_file)
outstream.write(header_template.render(metadata=metadata))
else:
outstream.write("<!DOCTYPE html>\n<html>\n<head></head>\n<body>\n")
with tempfile.TemporaryDirectory() as tempdir:
for char in characters:
body_file = prefs.get("templates.listing.character.html.{}".format(char.get_type_key()))
if not body_file:
body_file = prefs.get("templates.listing.character.html.default")
body_template = Template(filename=body_file, module_directory=tempdir)
outstream.write(
markdown.markdown(
body_template.render(
character=char.copy_and_alter(html.escape)),
['markdown.extensions.extra']
))
outstream.write("</body>\n</html>\n")
return util.Result(True)
| Python | 0.000007 |
d3c7f5de6a4c1d15ab3ffe19da18faaecd466fb6 | replace mysteriously missing haystack settings from staging | tndata_backend/tndata_backend/settings/staging.py | tndata_backend/tndata_backend/settings/staging.py | from .base import *
DEBUG = False
#DEBUG = True
STAGING = True
# Site's FQDN and URL. For building links in email.
SITE_DOMAIN = "staging.tndata.org"
SITE_URL = "https://{0}".format(SITE_DOMAIN)
INSTALLED_APPS = INSTALLED_APPS + (
'debug_toolbar',
'querycount',
)
# Just like production, but without the cached template loader
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
TEMPLATES[0]['OPTIONS']['loaders'] = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
# django-cors-headers: https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = True
# EMAIL via Mailgun. Production server details, below (staging.tndata.org)
EMAIL_SUBJECT_PREFIX = "[Staging TNData] "
EMAIL_HOST = 'smtp.mailgun.org'
EMAIL_HOST_USER = 'postmaster@sandbox4dc4d62d8cf24785914c55630ab480e6.mailgun.org'
EMAIL_HOST_PASSWORD = 'ac2a70a9988127ff7fa217f559c2d59a'
EMAIL_PORT = '587'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# Caching
# Redis notes: redis_max_clients: 10000, edis_max_memory: 512mb
REDIS_PASSWORD = 'VPoDYBZgeyktxArddu4EHrNMdFsUzf7TtFKTP'
REDIS_HOST = 'worker.tndata.org'
REDIS_CACHE_DB = 2
REDIS_CACHE_URL = 'redis://:{password}@{host}:{port}/{db}'.format(
password=REDIS_PASSWORD,
host=REDIS_HOST,
port=REDIS_PORT,
db=REDIS_CACHE_DB
)
CACHES['default']['LOCATION'] = REDIS_CACHE_URL
CACHES['default']['OPTIONS']['IGNORE_EXCEPTIONS'] = True
# django-haystack settings for staging
HAYSTACK_CONNECTIONS['default']['URL'] = 'http://worker.tndata.org:9200/'
HAYSTACK_CONNECTIONS['default']['INDEX_NAME'] = 'haystack_staging'
# django-cacheops
CACHEOPS_REDIS = {
'host': REDIS_HOST,
'port': REDIS_PORT,
'db': REDIS_CACHE_DB,
'socket_timeout': 5,
'password': REDIS_PASSWORD,
}
# Explicit setting for debug_toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS = False
MIDDLEWARE_CLASSES = (
'querycount.middleware.QueryCountMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE_CLASSES
INTERNAL_IPS = (
'159.203.68.206',
'127.0.0.1',
'::1',
)
| from .base import *
DEBUG = False
#DEBUG = True
STAGING = True
# Site's FQDN and URL. For building links in email.
SITE_DOMAIN = "staging.tndata.org"
SITE_URL = "https://{0}".format(SITE_DOMAIN)
INSTALLED_APPS = INSTALLED_APPS + (
'debug_toolbar',
'querycount',
)
# Just like production, but without the cached template loader
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
TEMPLATES[0]['OPTIONS']['loaders'] = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
# django-cors-headers: https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = True
# EMAIL via Mailgun. Production server details, below (staging.tndata.org)
EMAIL_SUBJECT_PREFIX = "[Staging TNData] "
EMAIL_HOST = 'smtp.mailgun.org'
EMAIL_HOST_USER = 'postmaster@sandbox4dc4d62d8cf24785914c55630ab480e6.mailgun.org'
EMAIL_HOST_PASSWORD = 'ac2a70a9988127ff7fa217f559c2d59a'
EMAIL_PORT = '587'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# Caching
# Redis notes: redis_max_clients: 10000, edis_max_memory: 512mb
REDIS_PASSWORD = 'VPoDYBZgeyktxArddu4EHrNMdFsUzf7TtFKTP'
REDIS_HOST = 'worker.tndata.org'
REDIS_CACHE_DB = 2
REDIS_CACHE_URL = 'redis://:{password}@{host}:{port}/{db}'.format(
password=REDIS_PASSWORD,
host=REDIS_HOST,
port=REDIS_PORT,
db=REDIS_CACHE_DB
)
CACHES['default']['LOCATION'] = REDIS_CACHE_URL
CACHES['default']['OPTIONS']['IGNORE_EXCEPTIONS'] = True
# django-cacheops
CACHEOPS_REDIS = {
'host': REDIS_HOST,
'port': REDIS_PORT,
'db': REDIS_CACHE_DB,
'socket_timeout': 5,
'password': REDIS_PASSWORD,
}
# Explicit setting for debug_toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS = False
MIDDLEWARE_CLASSES = (
'querycount.middleware.QueryCountMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE_CLASSES
INTERNAL_IPS = (
'159.203.68.206',
'127.0.0.1',
'::1',
)
| Python | 0.000001 |
cba82ad3bc1a726402e4193aec8a49a85f9999f0 | Add an 'if 0''d block of code to numpy.distutils.log to ignore some log messages. Especially useful to turn on if you're developing by using eggs. | numpy/distutils/log.py | numpy/distutils/log.py | # Colored log, requires Python 2.3 or up.
import sys
from distutils.log import *
from distutils.log import Log as old_Log
from distutils.log import _global_log
from misc_util import red_text, yellow_text, cyan_text, green_text, is_sequence, is_string
def _fix_args(args,flag=1):
if is_string(args):
return args.replace('%','%%')
if flag and is_sequence(args):
return tuple([_fix_args(a,flag=0) for a in args])
return args
class Log(old_Log):
def _log(self, level, msg, args):
if level >= self.threshold:
if args:
msg = msg % _fix_args(args)
if 0:
if msg.startswith('copying ') and msg.find(' -> ') != -1:
return
if msg.startswith('byte-compiling '):
return
print _global_color_map[level](msg)
sys.stdout.flush()
def good(self, msg, *args):
"""If we'd log WARN messages, log this message as a 'nice' anti-warn
message.
"""
if WARN >= self.threshold:
if args:
print green_text(msg % _fix_args(args))
else:
print green_text(msg)
sys.stdout.flush()
_global_log.__class__ = Log
good = _global_log.good
def set_threshold(level, force=False):
prev_level = _global_log.threshold
if prev_level > DEBUG or force:
# If we're running at DEBUG, don't change the threshold, as there's
# likely a good reason why we're running at this level.
_global_log.threshold = level
if level <= DEBUG:
info('set_threshold: setting thershold to DEBUG level, it can be changed only with force argument')
else:
info('set_threshold: not changing thershold from DEBUG level %s to %s' % (prev_level,level))
return prev_level
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
if v < 0:
set_threshold(ERROR, force)
elif v == 0:
set_threshold(WARN, force)
elif v == 1:
set_threshold(INFO, force)
elif v >= 2:
set_threshold(DEBUG, force)
return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1)
_global_color_map = {
DEBUG:cyan_text,
INFO:yellow_text,
WARN:red_text,
ERROR:red_text,
FATAL:red_text
}
# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.
set_verbosity(0, force=True)
| # Colored log, requires Python 2.3 or up.
import sys
from distutils.log import *
from distutils.log import Log as old_Log
from distutils.log import _global_log
from misc_util import red_text, yellow_text, cyan_text, green_text, is_sequence, is_string
def _fix_args(args,flag=1):
if is_string(args):
return args.replace('%','%%')
if flag and is_sequence(args):
return tuple([_fix_args(a,flag=0) for a in args])
return args
class Log(old_Log):
def _log(self, level, msg, args):
if level >= self.threshold:
if args:
print _global_color_map[level](msg % _fix_args(args))
else:
print _global_color_map[level](msg)
sys.stdout.flush()
def good(self, msg, *args):
"""If we'd log WARN messages, log this message as a 'nice' anti-warn
message.
"""
if WARN >= self.threshold:
if args:
print green_text(msg % _fix_args(args))
else:
print green_text(msg)
sys.stdout.flush()
_global_log.__class__ = Log
good = _global_log.good
def set_threshold(level, force=False):
prev_level = _global_log.threshold
if prev_level > DEBUG or force:
# If we're running at DEBUG, don't change the threshold, as there's
# likely a good reason why we're running at this level.
_global_log.threshold = level
if level <= DEBUG:
info('set_threshold: setting thershold to DEBUG level, it can be changed only with force argument')
else:
info('set_threshold: not changing thershold from DEBUG level %s to %s' % (prev_level,level))
return prev_level
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
if v < 0:
set_threshold(ERROR, force)
elif v == 0:
set_threshold(WARN, force)
elif v == 1:
set_threshold(INFO, force)
elif v >= 2:
set_threshold(DEBUG, force)
return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1)
_global_color_map = {
DEBUG:cyan_text,
INFO:yellow_text,
WARN:red_text,
ERROR:red_text,
FATAL:red_text
}
# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.
set_verbosity(0, force=True)
| Python | 0 |
ec4c9a07dc5ca2fab6b341932f65d0cfbd6a332b | Bump version to 1.1 | molly/__init__.py | molly/__init__.py | """
Molly Project
http://mollyproject.org
A framework for creating Mobile Web applications for HE/FE institutions.
"""
__version__ = '1.1' | """
Molly Project
http://mollyproject.org
A framework for creating Mobile Web applications for HE/FE institutions.
"""
__version__ = '1.0' | Python | 0 |
75e61ecf5efebe78676512d714fc7551f3dfac4c | Fix test | src/program/lwaftr/tests/subcommands/generate_binding_table_test.py | src/program/lwaftr/tests/subcommands/generate_binding_table_test.py | """
Test uses "snabb lwaftr generate-configuration" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from test_env import ENC, SNABB_CMD, BaseTestCase
NUM_SOFTWIRES = 10
class TestGenerateBindingTable(BaseTestCase):
generation_args = (
str(SNABB_CMD), 'lwaftr', 'generate-configuration', '193.5.1.100',
str(NUM_SOFTWIRES), 'fc00::100', 'fc00:1:2:3:4:5:0:7e', '1')
def test_binding_table_generation(self):
"""
This runs the generate-configuration subcommand and verifies that
the output contains a valid binding-table.
Usage can be found in the README; however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Get generate-configuration command output.
output = self.run_cmd(self.generation_args)
# Split it into lines.
config = str(output, ENC).split('\n')[:-1]
# Check out that output is softwire-config plus a binding-table.
self.assertIn('softwire-config {', config[0].strip())
self.assertIn('binding-table {', config[1].strip())
lineno = 2
while lineno < len(config):
line = config[lineno].strip()
if not line.startswith('softwire {'):
break
self.assertTrue(line.startswith('softwire {'))
self.assertTrue(line.endswith('}'))
lineno = lineno + 1
self.assertTrue(lineno < len(config))
self.assertTrue(config[lineno].strip() == '}')
| """
Test uses "snabb lwaftr generate-binding-table" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from test_env import ENC, SNABB_CMD, BaseTestCase
NUM_SOFTWIRES = 10
class TestGenerateBindingTable(BaseTestCase):
generation_args = (
str(SNABB_CMD), 'lwaftr', 'generate-binding-table', '193.5.1.100',
str(NUM_SOFTWIRES), 'fc00::100', 'fc00:1:2:3:4:5:0:7e', '1')
def test_binding_table_generation(self):
"""
This runs the generate-binding-table subcommand and verifies that
it gets back the number of softwires it expects.
Usage can be found in the README; however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Get generate-binding-table command output.
output = self.run_cmd(self.generation_args)
# Split it into lines.
config = str(output, ENC).split('\n')[:-1]
# The output should be "binding-table {" followed by NUM_SOFTWIRES
# softwires, then "}".
self.assertIn('binding-table {', config[0],
'Start line: %s' % config[0])
for idx, softwire in enumerate(config[1:-1]):
line_msg = 'Line #%d: %s' % (idx + 2, softwire)
self.assertTrue(softwire.startswith(' softwire {'), line_msg)
self.assertTrue(softwire.endswith('}'), line_msg)
self.assertIn(config[-1], '}',
'End line: %s' % config[0])
# Check that the number of lines is the number of softwires
# plus the start and end lines.
self.assertEqual(len(config), NUM_SOFTWIRES + 2, len(config))
| Python | 0.000004 |
224522e88347d4eafd68202222bb83c2d596524b | Modify SCons tools | conda/python-dev/boost_python.py | conda/python-dev/boost_python.py | from types import MethodType
import itertools
def generate(env):
"""Add Builders and construction variables to the Environment."""
if not 'boost_python' in env['TOOLS'][:-1]:
env.Tool('system')
env.AppendUnique(LIBS = ['boost_python'])
env.AppendUnique(CPPDEFINES = ['BOOST_PYTHON_DYNAMIC_LIB',
'BOOST_ALL_NO_LIB'])
def BuildBoostPython(env, target, sources):
# Code to build "target" from "source"
target = env.File(target).srcnode()
targets = list(itertools.chain(*[env.SharedObject(None, source) for source in sources if source.suffix in ['.cpp', '.cxx', '.c++']]))
sources = [source for source in sources if source.suffix == '.h']
SYSTEM = env['SYSTEM']
if SYSTEM == 'linux' and len(sources) == 1:
cmd = env.Command(sources[0].target_from_source('', '.h.gch'), sources[0], '$CXX -o $TARGET -x c++-header -c -fPIC $SHCXXFLAGS $_CCCOMCOM $SOURCE')
env.Depends(targets, cmd)
env.Depends(target, targets)
source = env.File('response_file.rsp')
with open(source.abspath, 'w') as filehandler:
filehandler.write(' '.join(target.abspath.replace('\\','/') + ' ' for target in targets))
env.Append(LINKFLAGS = '@' + source.abspath)
kwargs = dict(SHLIBSUFFIX = '.so',
SHLIBPREFIX = '')
if SYSTEM == 'osx':
return env.LoadableModule(target, [], LDMODULESUFFIX='.so',
FRAMEWORKSFLAGS = '-flat_namespace -undefined suppress', **kwargs)
else:
return env.LoadableModule(target, [], **kwargs)
env.BuildBoostPython = MethodType(BuildBoostPython, env)
env.Tool('python')
def exists(env):
return 1 | from types import MethodType
import itertools
def generate(env):
"""Add Builders and construction variables to the Environment."""
if not 'boost_python' in env['TOOLS'][:-1]:
env.Tool('system')
env.AppendUnique(LIBS = ['boost_python'])
env.AppendUnique(CPPDEFINES = ['BOOST_PYTHON_DYNAMIC_LIB',
'BOOST_ALL_NO_LIB'])
def BuildBoostPython(env, target, sources):
# Code to build "target" from "source"
target = env.File(target).srcnode()
targets = list(itertools.chain(*[env.SharedObject(None, source) for source in sources if source.suffix in ['.cpp', '.cxx', '.c++']]))
print sources
sources = [source for source in sources if source.suffix == '.h']
print sources
SYSTEM = env['SYSTEM']
print SYSTEM
if SYSTEM == 'linux' and len(sources) == 1:
cmd = env.Command(sources[0].target_from_source('', '.h.gch'), sources[0], '$CXX -o $TARGET -x c++-header -c -fPIC $SHCXXFLAGS $_CCCOMCOM $SOURCE')
env.Depends(targets, cmd)
env.Depends(target, targets)
source = env.File('response_file.rsp')
with open(source.abspath, 'w') as filehandler:
filehandler.write(' '.join(target.abspath.replace('\\','/') + ' ' for target in targets))
env.Append(LINKFLAGS = '@' + source.abspath)
kwargs = dict(SHLIBSUFFIX = '.so',
SHLIBPREFIX = '')
if SYSTEM == 'osx':
return env.LoadableModule(target, [], LDMODULESUFFIX='.so',
FRAMEWORKSFLAGS = '-flat_namespace -undefined suppress', **kwargs)
else:
return env.LoadableModule(target, [], **kwargs)
env.BuildBoostPython = MethodType(BuildBoostPython, env)
env.Tool('python')
def exists(env):
return 1 | Python | 0 |
1ca5ba7884d35193f0a035b8e8f6ac4ac6032928 | stop cycling after applying the forumla | mpexpertadjust.py | mpexpertadjust.py | #!/usr/bin/env python
import os, sys, csv
import tkinter, tkinter.messagebox
STANDARD_FILE='defstd.txt'
STANDARD_NAME=0
STANDARD_ELEMENT=1
STANDARD_QTY=2
SAMPLE_NAME=0
SAMPLE_DATE=2
SAMPLE_ELEMENT=4
SAMPLE_QTY=8
OUTPUT_FILE='output.csv'
def is_standard(label, element, standards):
"""Check if a label is a standard"""
for s in standards:
if label.strip()==s[STANDARD_NAME].strip() and element.strip()==s[STANDARD_ELEMENT].strip():
return (True,s[STANDARD_QTY])
return (False,)
def written_lines_reversed(f):
"""Return all lines written in the output file as a list in reverse order"""
f.seek(0) # restart from the beginning of the file
res = []
while True:
line = f.readline()
if line=='': break
res.append(line)
f.seek(2) # set the pointer at the end of the file
return res.reverse()
### MAIN PROGRAM ###
window = tkinter.Tk()
window.wm_withdraw()
if len(sys.argv) == 1:
tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='No input file supplied')
sys.exit(1)
# READ STANDARD FILE
standards = []
try:
f = open(STANDARD_FILE,'r')
reader = csv.reader(f, delimiter=';')
for i in reader:
if len(i)==0: continue
standards.append([elem.strip() for elem in i])
f.close()
except:
tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='Malformed standard file '+STANDARD_FILE)
sys.exit(1)
# READ SAMPLE FILE
SAMPLE_FILE=sys.argv[1]
samples = []
try:
f = open(SAMPLE_FILE,'r')
while True:
line = f.readline()
if line == '': break
if line == '\n': continue
if line.startswith('Label,Type'): continue
if line[0] == '\x00': continue
if line[0] == '\ufeff': continue
line_ar = line.split(',')
if len(line_ar)>=2 and (line_ar[1]=='STD' or line_ar[1]=='BLK'): continue
samples.append([elem.strip() for elem in line_ar])
f.close()
except:
tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='Malformed input file '+SAMPLE_FILE)
sys.exit(1)
#print(standards)
#Eprint(samples)
with open(OUTPUT_FILE,'w+') as f:
standards_present=[]
for row in samples:
std = is_standard(row[SAMPLE_NAME],row[SAMPLE_ELEMENT],standards)
if std[0]==True:
f.write(row[SAMPLE_NAME]+','+row[SAMPLE_ELEMENT]+','+row[SAMPLE_QTY]+','+std[1]+','+row[SAMPLE_DATE]+'\n')
continue
out_rev = written_lines_reversed(f)
# now we reverse the already written output file and we parse it
# searching for the first standard with the current element
for line in out_rev:
if row[SAMPLE_ELEMENT]==line[1] and is_standard(line[0],row[1],standards)[0]==True:
# standard found! Applying the forumula
res = (line[3]*row[SAMPLE_QTY])/line[2]
dilution = row[0].split(' ')[-1]
res = res*dilution
f.write(row[SAMPLE_NAME]+','+row[SAMPLE_ELEMENT]+','+row[SAMPLE_QTY]+','+res+','+row[SAMPLE_DATE]+'\n')
break
| #!/usr/bin/env python
import os, sys, csv
import tkinter, tkinter.messagebox
STANDARD_FILE='defstd.txt'
STANDARD_NAME=0
STANDARD_ELEMENT=1
STANDARD_QTY=2
SAMPLE_NAME=0
SAMPLE_DATE=2
SAMPLE_ELEMENT=4
SAMPLE_QTY=8
OUTPUT_FILE='output.csv'
def is_standard(label, element, standards):
"""Check if a label is a standard"""
for s in standards:
if label.strip()==s[STANDARD_NAME].strip() and element.strip()==s[STANDARD_ELEMENT].strip():
return (True,s[STANDARD_QTY])
return (False,)
def written_lines_reversed(f):
"""Return all lines written in the output file as a list in reverse order"""
f.seek(0) # restart from the beginning of the file
res = []
while True:
line = f.readline()
if line=='': break
res.append(line)
f.seek(2) # set the pointer at the end of the file
return res.reverse()
### MAIN PROGRAM ###
window = tkinter.Tk()
window.wm_withdraw()
if len(sys.argv) == 1:
tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='No input file supplied')
sys.exit(1)
# READ STANDARD FILE
standards = []
try:
f = open(STANDARD_FILE,'r')
reader = csv.reader(f, delimiter=';')
for i in reader:
if len(i)==0: continue
standards.append([elem.strip() for elem in i])
f.close()
except:
tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='Malformed standard file '+STANDARD_FILE)
sys.exit(1)
# READ SAMPLE FILE
SAMPLE_FILE=sys.argv[1]
samples = []
try:
f = open(SAMPLE_FILE,'r')
while True:
line = f.readline()
if line == '': break
if line == '\n': continue
if line.startswith('Label,Type'): continue
if line[0] == '\x00': continue
if line[0] == '\ufeff': continue
line_ar = line.split(',')
if len(line_ar)>=2 and (line_ar[1]=='STD' or line_ar[1]=='BLK'): continue
samples.append([elem.strip() for elem in line_ar])
f.close()
except:
tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='Malformed input file '+SAMPLE_FILE)
sys.exit(1)
#print(standards)
#Eprint(samples)
with open(OUTPUT_FILE,'w+') as f:
standards_present=[]
for row in samples:
std = is_standard(row[SAMPLE_NAME],row[SAMPLE_ELEMENT],standards)
if std[0]==True:
f.write(row[SAMPLE_NAME]+','+row[SAMPLE_ELEMENT]+','+row[SAMPLE_QTY]+','+std[1]+','+row[SAMPLE_DATE]+'\n')
continue
out_rev = written_lines_reversed(f)
# now we reverse the already written output file and we parse it
# searching for the first standard with the current element
for line in out_rev:
if row[SAMPLE_ELEMENT]==line[1] and is_standard(line[0],row[1],standards)[0]==True:
# standard found! Applying the forumula
res = (line[3]*row[SAMPLE_QTY])/line[2]
dilution = row[0].split(' ')[-1]
res = res*dilution
f.write(row[SAMPLE_NAME]+','+row[SAMPLE_ELEMENT]+','+row[SAMPLE_QTY]+','+res+','+row[SAMPLE_DATE]+'\n')
| Python | 0 |
fc22465decac6a33543e5232097af7ea847c4029 | Bump version to 1.0.1-machtfit-41 | src/oscar/__init__.py | src/oscar/__init__.py | import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 41)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.analytics',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.voucher',
'oscar.apps.dashboard',
'oscar.apps.dashboard.users',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.catalogue',
'oscar.apps.dashboard.offers',
'oscar.apps.dashboard.partners',
'oscar.apps.dashboard.ranges',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 40)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.analytics',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.voucher',
'oscar.apps.dashboard',
'oscar.apps.dashboard.users',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.catalogue',
'oscar.apps.dashboard.offers',
'oscar.apps.dashboard.partners',
'oscar.apps.dashboard.ranges',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| Python | 0 |
14ee6e2e9986c58fdeb8e482f3426b756ab1d2cb | Bump dev version | mtools/version.py | mtools/version.py | #!/usr/bin/env python3
"""Mtools version."""
__version__ = '1.7.0-dev'
| #!/usr/bin/env python3
"""Mtools version."""
__version__ = '1.6.4'
| Python | 0 |
f83ce11dccd7209e4c124e9dadbcbbd86568e320 | Comment reason why the example is commented out | numba/tests/compile_with_pycc.py | numba/tests/compile_with_pycc.py | import cmath
import numpy as np
from numba import exportmany, export
from numba.pycc import CC
#
# New API
#
cc = CC('pycc_test_simple')
@cc.export('multf', 'f4(f4, f4)')
@cc.export('multi', 'i4(i4, i4)')
def mult(a, b):
return a * b
_two = 2
# This one can't be compiled by the legacy API as it doesn't execute
# the script in a proper module.
@cc.export('square', 'i8(i8)')
def square(u):
return u ** _two
# These ones need helperlib
cc_helperlib = CC('pycc_test_helperlib')
@cc_helperlib.export('power', 'i8(i8, i8)')
def power(u, v):
return u ** v
@cc_helperlib.export('sqrt', 'c16(c16)')
def sqrt(u):
return cmath.sqrt(u)
@cc_helperlib.export('size', 'i8(f8[:])')
def sqrt(arr):
return arr.size
# This one clashes with libc random() unless pycc takes measures
# to disambiguate implementation names.
@cc_helperlib.export('random', 'f8(i4)')
def random_impl(seed):
np.random.seed(seed)
return np.random.random()
# These ones need NRT
cc_nrt = CC('pycc_test_nrt')
cc_nrt.use_nrt = True
@cc_nrt.export('zero_scalar', 'f8(i4)')
def zero_scalar(n):
arr = np.zeros(n)
return arr[-1]
# Fails because it needs an environment
#@cc_nrt.export('zeros', 'f8[:](i4)')
#def zeros(n):
#return np.zeros(n)
#
# Legacy API
#
exportmany(['multf f4(f4,f4)', 'multi i4(i4,i4)'])(mult)
# Needs to link to helperlib to due with complex arguments
# export('multc c16(c16,c16)')(mult)
export('mult f8(f8, f8)')(mult)
| import cmath
import numpy as np
from numba import exportmany, export
from numba.pycc import CC
#
# New API
#
cc = CC('pycc_test_simple')
@cc.export('multf', 'f4(f4, f4)')
@cc.export('multi', 'i4(i4, i4)')
def mult(a, b):
return a * b
_two = 2
# This one can't be compiled by the legacy API as it doesn't execute
# the script in a proper module.
@cc.export('square', 'i8(i8)')
def square(u):
return u ** _two
# These ones need helperlib
cc_helperlib = CC('pycc_test_helperlib')
@cc_helperlib.export('power', 'i8(i8, i8)')
def power(u, v):
return u ** v
@cc_helperlib.export('sqrt', 'c16(c16)')
def sqrt(u):
return cmath.sqrt(u)
@cc_helperlib.export('size', 'i8(f8[:])')
def sqrt(arr):
return arr.size
# This one clashes with libc random() unless pycc takes measures
# to disambiguate implementation names.
@cc_helperlib.export('random', 'f8(i4)')
def random_impl(seed):
np.random.seed(seed)
return np.random.random()
# These ones need NRT
cc_nrt = CC('pycc_test_nrt')
cc_nrt.use_nrt = True
@cc_nrt.export('zero_scalar', 'f8(i4)')
def zero_scalar(n):
arr = np.zeros(n)
return arr[-1]
#@cc_nrt.export('zeros', 'f8(i4)')
#def empty_scalar(n):
#arr = np.empty(n)
#return arr[-1]
#
# Legacy API
#
exportmany(['multf f4(f4,f4)', 'multi i4(i4,i4)'])(mult)
# Needs to link to helperlib to due with complex arguments
# export('multc c16(c16,c16)')(mult)
export('mult f8(f8, f8)')(mult)
| Python | 0.000008 |
2f55f00c17b51f24b5407182516c22baead08879 | remove BeautifulSoup for now | plugins/slideshare/slideshare.py | plugins/slideshare/slideshare.py | #!/usr/bin/env python
import urllib2
import re
import urllib
import time
import sha
#import BeautifulSoup
#from BeautifulSoup import BeautifulStoneSoup
from optparse import OptionParser
TOTALIMPACT_SLIDESHARE_KEY = "nyHCUoNM"
TOTALIMPACT_SLIDESHARE_SECRET = "z7sRiGCG"
SLIDESHARE_DOI_URL = "http://www.slideshare.net/api/2/get_slideshow?api_key=nyHCUoNM&detailed=1&ts=%s&hash=%s&slideshow_url=%s"
SLIDESHARE_DOWNLOADS_PATTERN = re.compile("<NumDownloads>(?P<stats>\d+)</NumDownloads>", re.DOTALL)
SLIDESHARE_VIEWS_PATTERN = re.compile("<NumViews>(?P<stats>\d+)</NumViews>", re.DOTALL)
SLIDESHARE_COMMENTS_PATTERN = re.compile("<NumComments>(?P<stats>\d+)</NumComments>", re.DOTALL)
SLIDESHARE_FAVORITES_PATTERN = re.compile("<NumFavorites>(?P<stats>\d+)</NumFavorites>", re.DOTALL)
def get_page(id):
if not id:
return(None)
ts = time.time()
hash_combo = sha.new(TOTALIMPACT_SLIDESHARE_SECRET + str(ts)).hexdigest()
url = SLIDESHARE_DOI_URL %(ts, hash_combo, id)
#print url
try:
page = urllib2.urlopen(url).read()
except urllib2.HTTPError, err:
if err.code == 404:
page = None
else:
raise
return(page)
def get_stats(page):
if not page:
return(None)
if (False):
soup = BeautifulStoneSoup(page)
downloads = soup.numdownloads.text
views = soup.numviews.text
comments = soup.numcomments.text
favorites = soup.numfavorites.text
matches = SLIDESHARE_DOWNLOADS_PATTERN.search(page)
if matches:
downloads = matches.group("stats")
matches = SLIDESHARE_VIEWS_PATTERN.search(page)
if matches:
views = matches.group("stats")
matches = SLIDESHARE_COMMENTS_PATTERN.search(page)
if matches:
comments = matches.group("stats")
matches = SLIDESHARE_FAVORITES_PATTERN.search(page)
if matches:
favorites = matches.group("stats")
response = {"downloads":downloads, "views":views, "comments":comments, "favorites":favorites}
return(response)
from optparse import OptionParser
def main():
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 1.0")
#parser.add_option("-x", "--xhtml",
# action="store_true",
# dest="xhtml_flag",
# default=False,
# help="create a XHTML template instead of HTML")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("wrong number of arguments")
#print options
#print args
id = args[0]
page = get_page(id)
response = get_stats(page)
print response
return(response)
if __name__ == '__main__':
main()
#example = "http://www.slideshare.net/hpiwowar/7-data-citation-challenges-illustrated-with-data-includes-elephants"
| #!/usr/bin/env python
import urllib2
import re
import urllib
import time
import sha
import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
from optparse import OptionParser
TOTALIMPACT_SLIDESHARE_KEY = "nyHCUoNM"
TOTALIMPACT_SLIDESHARE_SECRET = "z7sRiGCG"
SLIDESHARE_DOI_URL = "http://www.slideshare.net/api/2/get_slideshow?api_key=nyHCUoNM&detailed=1&ts=%s&hash=%s&slideshow_url=%s"
SLIDESHARE_DOWNLOADS_PATTERN = re.compile("<NumDownloads>(?P<stats>\d+)</NumDownloads>", re.DOTALL)
SLIDESHARE_VIEWS_PATTERN = re.compile("<NumViews>(?P<stats>\d+)</NumViews>", re.DOTALL)
SLIDESHARE_COMMENTS_PATTERN = re.compile("<NumComments>(?P<stats>\d+)</NumComments>", re.DOTALL)
SLIDESHARE_FAVORITES_PATTERN = re.compile("<NumFavorites>(?P<stats>\d+)</NumFavorites>", re.DOTALL)
def get_page(id):
if not id:
return(None)
ts = time.time()
hash_combo = sha.new(TOTALIMPACT_SLIDESHARE_SECRET + str(ts)).hexdigest()
url = SLIDESHARE_DOI_URL %(ts, hash_combo, id)
#print url
try:
page = urllib2.urlopen(url).read()
except urllib2.HTTPError, err:
if err.code == 404:
page = None
else:
raise
return(page)
def get_stats(page):
if not page:
return(None)
if (False):
soup = BeautifulStoneSoup(page)
downloads = soup.numdownloads.text
views = soup.numviews.text
comments = soup.numcomments.text
favorites = soup.numfavorites.text
matches = SLIDESHARE_DOWNLOADS_PATTERN.search(page)
if matches:
downloads = matches.group("stats")
matches = SLIDESHARE_VIEWS_PATTERN.search(page)
if matches:
views = matches.group("stats")
matches = SLIDESHARE_COMMENTS_PATTERN.search(page)
if matches:
comments = matches.group("stats")
matches = SLIDESHARE_FAVORITES_PATTERN.search(page)
if matches:
favorites = matches.group("stats")
response = {"downloads":downloads, "views":views, "comments":comments, "favorites":favorites}
return(response)
from optparse import OptionParser
def main():
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 1.0")
#parser.add_option("-x", "--xhtml",
# action="store_true",
# dest="xhtml_flag",
# default=False,
# help="create a XHTML template instead of HTML")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("wrong number of arguments")
#print options
#print args
id = args[0]
page = get_page(id)
response = get_stats(page)
print response
return(response)
if __name__ == '__main__':
main()
#example = "http://www.slideshare.net/hpiwowar/7-data-citation-challenges-illustrated-with-data-includes-elephants"
| Python | 0 |
63a3e6e0c65fa17e6abe58da06b4bdfa20c62bfe | Add onchange for set vector in orders | mx_agent/agent.py | mx_agent/agent.py | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
""" Model name: Sale Order
"""
_inherit = 'sale.order'
# TODO onchange for setup from partner
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
res = super(SaleOrder, self).on_change_partner_id(
cr, uid, ids, partner_id, context=context)
# Update agent field for partner form
# TODO propagate!!!
if 'value' not in res:
res['value'] = {}
if partner_id:
partner_proxy = self.pool.get('res.partner').browse(
cr, uid, partner_id, context=context)
res['value'][
'mx_agent_id'] = partner_proxy.agent_id.id
else:
res['value']['mx_agent_id'] = False
return res
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
class AccountInvoice(orm.Model):
""" Model name: Account Invoice
"""
_inherit = 'account.invoice'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
class StockPicking(orm.Model):
""" Model name: Stock Picking
"""
_inherit = 'stock.picking'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
class StockDdt(orm.Model):
""" Model name: Stock DDT
"""
_inherit = 'stock.ddt'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
""" Model name: Sale Order
"""
_inherit = 'sale.order'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
class AccountInvoice(orm.Model):
""" Model name: Account Invoice
"""
_inherit = 'account.invoice'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
class StockPicking(orm.Model):
""" Model name: Stock Picking
"""
_inherit = 'stock.picking'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
class StockDdt(orm.Model):
""" Model name: Stock DDT
"""
_inherit = 'stock.ddt'
# TODO onchange for setup from partner
_columns = {
'mx_agent_id': fields.many2one('res.partner', 'Agent',
domain=[('is_agent', '=', True)]),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 |
6eedd6e5b96d9ee051e7708c4c127fdfb6c2a92b | modify file : add class Report and Score | NippoKun/report/models.py | NippoKun/report/models.py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Report(models.Model):
report_author = models.ForeignKey(User, related_name='report_author')
report_title = models.CharField(max_length=50)
report_content = models.TextField(max_length=999)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Score(models.Model):
report = models.ForeignKey(Report, related_name='score')
score_author = models.ForeignKey(User, related_name='score_author')
score = models.IntegerField()
evaluate_point = models.TextField(max_length=30)
comment = models.TextField(max_length=999, blank=True)
average_score = models.FloatField()
scored_at = models.DateTimeField(auto_now=True)
| from django.db import models
# Create your models here.
| Python | 0 |
88d2918606870ef7bdaafda87b37537d21c02036 | Extend failed and end with traceback | polyaxon_client/tracking/base.py | polyaxon_client/tracking/base.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import atexit
import sys
import time
from polystores.stores.manager import StoreManager
from polyaxon_client import PolyaxonClient, settings
from polyaxon_client.exceptions import PolyaxonClientException
from polyaxon_client.tracking.paths import get_outputs_path
from polyaxon_client.tracking.utils.project import get_project_info
class BaseTracker(object):
def __init__(self,
project=None,
client=None,
track_logs=True,
track_code=True,
track_env=True,
outputs_store=None):
if settings.NO_OP:
return
if not settings.IN_CLUSTER and project is None:
raise PolyaxonClientException('Please provide a valid project.')
self.last_status = None
self.client = client or PolyaxonClient()
if settings.IN_CLUSTER:
self.user = None
else:
self.user = (self.client.auth.get_user().username
if self.client.api_config.schema_response
else self.client.auth.get_user().get('username'))
username, project_name = get_project_info(current_user=self.user, project=project)
self.track_logs = track_logs
self.track_code = track_code
self.track_env = track_env
self.project = project
self.username = username
self.project_name = project_name
self.outputs_store = outputs_store
# Setup the outputs store
if outputs_store is None and settings.IN_CLUSTER:
self.set_outputs_store(outputs_path=get_outputs_path(), set_env_vars=True)
def _set_health_url(self):
raise NotImplementedError
def log_status(self, status, message=None, traceback=None):
raise NotImplementedError
def _start(self):
if settings.NO_OP:
return
atexit.register(self._end)
self.start()
def excepthook(exception, value, tb):
self.failed(message='Type: {}, Value: {}'.format(exception, value))
# Resume normal work
sys.__excepthook__(exception, value, tb)
sys.excepthook = excepthook
def _end(self):
if settings.NO_OP:
return
self.succeeded()
def start(self):
if settings.NO_OP:
return
self.log_status('running')
self.last_status = 'running'
def end(self, status, message=None, traceback=None):
if settings.NO_OP:
return
if self.last_status in ['succeeded', 'failed', 'stopped']:
return
self.log_status(status=status, message=message, traceback=traceback)
self.last_status = status
time.sleep(0.1) # Just to give the opportunity to the worker to pick the message
def succeeded(self):
if settings.NO_OP:
return
self.end('succeeded')
def stop(self):
if settings.NO_OP:
return
self.end('stopped')
def failed(self, message=None, traceback=None):
if settings.NO_OP:
return
self.end(status='failed', message=message, traceback=traceback)
def set_outputs_store(self, outputs_store=None, outputs_path=None, set_env_vars=False):
if settings.NO_OP:
return
if not any([outputs_store, outputs_path]):
raise PolyaxonClientException(
'An Store instance or and outputs path is required.')
self.outputs_store = outputs_store or StoreManager(path=outputs_path)
if self.outputs_store and set_env_vars:
self.outputs_store.set_env_vars()
def log_output(self, filename, **kwargs):
if settings.NO_OP:
return
self.outputs_store.upload_file(filename=filename)
def log_outputs(self, dirname, **kwargs):
if settings.NO_OP:
return
self.outputs_store.upload_dir(dirname=dirname)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import atexit
import sys
import time
from polystores.stores.manager import StoreManager
from polyaxon_client import PolyaxonClient, settings
from polyaxon_client.exceptions import PolyaxonClientException
from polyaxon_client.tracking.paths import get_outputs_path
from polyaxon_client.tracking.utils.project import get_project_info
class BaseTracker(object):
def __init__(self,
project=None,
client=None,
track_logs=True,
track_code=True,
track_env=True,
outputs_store=None):
if settings.NO_OP:
return
if not settings.IN_CLUSTER and project is None:
raise PolyaxonClientException('Please provide a valid project.')
self.last_status = None
self.client = client or PolyaxonClient()
if settings.IN_CLUSTER:
self.user = None
else:
self.user = (self.client.auth.get_user().username
if self.client.api_config.schema_response
else self.client.auth.get_user().get('username'))
username, project_name = get_project_info(current_user=self.user, project=project)
self.track_logs = track_logs
self.track_code = track_code
self.track_env = track_env
self.project = project
self.username = username
self.project_name = project_name
self.outputs_store = outputs_store
# Setup the outputs store
if outputs_store is None and settings.IN_CLUSTER:
self.set_outputs_store(outputs_path=get_outputs_path(), set_env_vars=True)
def _set_health_url(self):
raise NotImplementedError
def log_status(self, status, message=None, traceback=None):
raise NotImplementedError
def _start(self):
if settings.NO_OP:
return
atexit.register(self._end)
self.start()
def excepthook(exception, value, tb):
self.failed(message='Type: {}, Value: {}'.format(exception, value))
# Resume normal work
sys.__excepthook__(exception, value, tb)
sys.excepthook = excepthook
def _end(self):
if settings.NO_OP:
return
self.succeeded()
def start(self):
if settings.NO_OP:
return
self.log_status('running')
self.last_status = 'running'
def end(self, status, message=None):
if settings.NO_OP:
return
if self.last_status in ['succeeded', 'failed', 'stopped']:
return
self.log_status(status, message)
self.last_status = status
time.sleep(0.1) # Just to give the opportunity to the worker to pick the message
def succeeded(self):
if settings.NO_OP:
return
self.end('succeeded')
def stop(self):
if settings.NO_OP:
return
self.end('stopped')
def failed(self, message=None):
if settings.NO_OP:
return
self.end(status='failed', message=message)
def set_outputs_store(self, outputs_store=None, outputs_path=None, set_env_vars=False):
if settings.NO_OP:
return
if not any([outputs_store, outputs_path]):
raise PolyaxonClientException(
'An Store instance or and outputs path is required.')
self.outputs_store = outputs_store or StoreManager(path=outputs_path)
if self.outputs_store and set_env_vars:
self.outputs_store.set_env_vars()
def log_output(self, filename, **kwargs):
if settings.NO_OP:
return
self.outputs_store.upload_file(filename=filename)
def log_outputs(self, dirname, **kwargs):
if settings.NO_OP:
return
self.outputs_store.upload_dir(dirname=dirname)
| Python | 0 |
a2eae87fc76ba1e9fbfa8102c3e19c239445a62a | Fix form retrieval in ModelForm | nazs/web/forms.py | nazs/web/forms.py | from achilles.forms import * # noqa
from nazs.models import SingletonModel
# Override forms template
Form.template_name = 'web/form.html'
class ModelForm(ModelForm):
def get_form(self, form_data=None, *args, **kwargs):
# manage SingletonModels
if issubclass(self.form_class.Meta.model, SingletonModel):
instance = self.form_class.Meta.model.get()
return self.form_class(form_data, instance=instance)
else:
return super(ModelForm, self).get_form(form_data, *args, **kwargs)
| from achilles.forms import * # noqa
from nazs.models import SingletonModel
# Override forms template
Form.template_name = 'web/form.html'
class ModelForm(ModelForm):
def get_form(self, form_data=None, *args, **kwargs):
# manage SingletonModels
if issubclass(self.form_class.Meta.model, SingletonModel):
instance = self.form_class.Meta.model.get()
return self.form_class(form_data, instance=instance)
else:
return super(ModelForm, self).get_form(*args, **kwargs)
| Python | 0.000002 |
a4ee20e078175c5d75380afca7b02305440ab32f | Add a couple numeric columns to better portray overall performance. | postgresql/test/perf_query_io.py | postgresql/test/perf_query_io.py | #!/usr/bin/env python
##
# copyright 2009, James William Pye
# http://python.projects.postgresql.org
##
# Statement I/O: Mass insert and select performance
##
import os
import time
import sys
import decimal
def insertSamples(count, insert_records):
recs = [
(-3, 123, 0xfffffea023, decimal.Decimal("90900023123.40031"), decimal.Decimal("432.40031"), 'some_óäæ_thing', 'varying', 'æ')
for x in range(count)
]
gen = time.time()
insert_records.load(recs)
fin = time.time()
xacttime = fin - gen
ats = count / xacttime
sys.stderr.write(
"INSERT Summary,\n " \
"inserted tuples: %d\n " \
"total time: %f\n " \
"average tuples per second: %f\n\n" %(
count, xacttime, ats,
)
)
def timeTupleRead(portal):
loops = 0
tuples = 0
genesis = time.time()
for x in portal.chunks:
loops += 1
tuples += len(x)
finalis = time.time()
looptime = finalis - genesis
ats = tuples / looptime
sys.stderr.write(
"SELECT Summary,\n " \
"looped: {looped}\n " \
"looptime: {looptime}\n " \
"tuples: {ntuples}\n " \
"average tuples per second: {tps}\n ".format(
looped = loops,
looptime = looptime,
ntuples = tuples,
tps = ats
)
)
def main(count):
execute('CREATE TEMP TABLE samples '
'(i2 int2, i4 int4, i8 int8, n numeric, n2 numeric, t text, v varchar, c char)')
insert_records = prepare(
"INSERT INTO samples VALUES ($1, $2, $3, $4, $5, $6, $7, $8)"
)
select_records = prepare("SELECT * FROM samples")
try:
insertSamples(count, insert_records)
timeTupleRead(select_records())
finally:
execute("DROP TABLE samples")
def command(args):
main(int((args + [25000])[1]))
if __name__ == '__main__':
command(sys.argv)
| #!/usr/bin/env python
##
# copyright 2009, James William Pye
# http://python.projects.postgresql.org
##
# Statement I/O: Mass insert and select performance
##
import os
import time
import sys
def insertSamples(count, insert_records):
recs = [
(-3, 123, 0xfffffea023, 'some_óäæ_thing', 'varying', 'æ')
for x in range(count)
]
gen = time.time()
insert_records.load(recs)
fin = time.time()
xacttime = fin - gen
ats = count / xacttime
sys.stderr.write(
"INSERT Summary,\n " \
"inserted tuples: %d\n " \
"total time: %f\n " \
"average tuples per second: %f\n\n" %(
count, xacttime, ats,
)
)
def timeTupleRead(portal):
loops = 0
tuples = 0
genesis = time.time()
for x in portal.chunks:
loops += 1
tuples += len(x)
finalis = time.time()
looptime = finalis - genesis
ats = tuples / looptime
sys.stderr.write(
"SELECT Summary,\n " \
"looped: {looped}\n " \
"looptime: {looptime}\n " \
"tuples: {ntuples}\n " \
"average tuples per second: {tps}\n ".format(
looped = loops,
looptime = looptime,
ntuples = tuples,
tps = ats
)
)
def main(count):
execute('CREATE TEMP TABLE samples '
'(i2 int2, i4 int4, i8 int8, t text, v varchar, c char)')
insert_records = prepare(
"INSERT INTO samples VALUES ($1, $2, $3, $4, $5, $6)"
)
select_records = prepare("SELECT * FROM samples")
try:
insertSamples(count, insert_records)
timeTupleRead(select_records())
finally:
execute("DROP TABLE samples")
def command(args):
main(int((args + [25000])[1]))
if __name__ == '__main__':
command(sys.argv)
| Python | 0 |
b6dff8fcd7dec56703006f2a7bcf1c8c72d0c21b | FIX price sec. related field as readonly | price_security/models/invoice.py | price_security/models/invoice.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class account_invoice_line(models.Model):
_inherit = 'account.invoice.line'
# we add this fields instead of making original readonly because we need
# on change to change values, we make readonly in view because sometimes
# we want them to be writeable
invoice_line_tax_id_readonly = fields.Many2many(
related='invoice_line_tax_id',
)
price_unit_readonly = fields.Float(
related='price_unit',
)
product_can_modify_prices = fields.Boolean(
related='product_id.can_modify_prices',
readonly=True,
string='Product Can modify prices')
@api.one
@api.constrains(
'discount', 'product_can_modify_prices')
def check_discount(self):
if (
self.user_has_groups(
'price_security.group_restrict_prices') and
not self.product_can_modify_prices and self.invoice_id
):
self.env.user.check_discount(
self.discount,
self.invoice_id.partner_id.property_product_pricelist.id)
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class account_invoice_line(models.Model):
_inherit = 'account.invoice.line'
# we add this fields instead of making original readonly because we need
# on change to change values, we make readonly in view because sometimes
# we want them to be writeable
invoice_line_tax_id_readonly = fields.Many2many(
related='invoice_line_tax_id',
)
price_unit_readonly = fields.Float(
related='price_unit',
)
product_can_modify_prices = fields.Boolean(
related='product_id.can_modify_prices',
string='Product Can modify prices')
@api.one
@api.constrains(
'discount', 'product_can_modify_prices')
def check_discount(self):
if (
self.user_has_groups(
'price_security.group_restrict_prices') and
not self.product_can_modify_prices and self.invoice_id
):
self.env.user.check_discount(
self.discount,
self.invoice_id.partner_id.property_product_pricelist.id)
| Python | 0.000003 |
fb142d3324ca974c9308cb8ab18dd9db2c2aae0b | Use monospace font | editor.py | editor.py | #!/usr/bin/env python
import sys
import sip
sip.setapi('QString', 2)
from PyQt4.QtGui import QApplication, QFont, QPlainTextEdit, QSyntaxHighlighter, \
QTextCharFormat, QTextBlockUserData
from qutepart.SyntaxHighlighter import SyntaxHighlighter
from qutepart.syntax_manager import SyntaxManager
def main():
if len(sys.argv) != 2:
print 'Usage:\n\t%s FILE' % sys.argv[0]
filePath = sys.argv[1]
try:
syntax = SyntaxManager().getSyntaxBySourceFileName(filePath)
except KeyError:
print 'No syntax for', filePath
return
print 'Using syntax', syntax.name
with open(filePath) as file:
text = file.read()
app = QApplication(sys.argv)
pte = QPlainTextEdit()
pte.setPlainText(text)
pte.setWindowTitle(filePath)
pte.setFont(QFont("Monospace"))
hl = SyntaxHighlighter(syntax, pte.document())
pte.show()
return app.exec_()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import sys
import sip
sip.setapi('QString', 2)
from PyQt4.QtGui import QApplication, QPlainTextEdit, QSyntaxHighlighter, \
QTextCharFormat, QTextBlockUserData
from qutepart.SyntaxHighlighter import SyntaxHighlighter
from qutepart.syntax_manager import SyntaxManager
def main():
if len(sys.argv) != 2:
print 'Usage:\n\t%s FILE' % sys.argv[0]
filePath = sys.argv[1]
try:
syntax = SyntaxManager().getSyntaxBySourceFileName(filePath)
except KeyError:
print 'No syntax for', filePath
return
print 'Using syntax', syntax.name
with open(filePath) as file:
text = file.read()
app = QApplication(sys.argv)
pte = QPlainTextEdit()
pte.setPlainText(text)
pte.setWindowTitle(filePath)
hl = SyntaxHighlighter(syntax, pte.document())
pte.show()
return app.exec_()
if __name__ == '__main__':
main()
| Python | 0.000001 |
a098efa1b69d2de3b1e2437a056b0c6937cbf998 | add documentation | src/bat/images.py | src/bat/images.py | #!/usr/bin/python
## Binary Analysis Tool
## Copyright 2012 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This is a plugin for the Binary Analysis Tool. It generates images of files, both
full files and thumbnails. The files can be used for informational purposes, such
as detecting roughly where offsets can be found, if data is compressed or encrypted,
etc.
It also generates histograms, which show how different byte values are distributed.
This can provide another visual clue about how files are constructed. Binaries from
the same type (like ELF binaries) are actually quite similar, so binaries that
significantly deviate from this could mean something interesting.
This should be run as a postrun scan
'''
import os, os.path, sys, subprocess, array
from PIL import Image
def generateImages(filename, unpackreport, leafscans, envvars={}):
if not unpackreport.has_key('sha256'):
return
scanenv = os.environ
if envvars != None:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
scanenv[envname] = envvalue
except Exception, e:
pass
## TODO: check if BAT_IMAGEDIR exists
imagedir = scanenv.get('BAT_IMAGEDIR', '.')
fwfile = open(filename)
## this is very inefficient for large files, but we *really* need all the data :-(
fwdata = fwfile.read()
fwfile.close()
fwlen = len(fwdata)
if fwlen > 1024:
height = 1024
else:
height = fwlen
width = fwlen/height
## we might need to add some bytes so we can create a valid picture
if fwlen%height > 0:
width = width + 1
for i in range(0, height - (fwlen%height)):
fwdata = fwdata + chr(0)
imgbuffer = buffer(bytearray(fwdata))
im = Image.frombuffer("L", (height, width), imgbuffer, "raw", "L", 0, 1)
im.save("%s/%s.png" % (imagedir, unpackreport['sha256']))
if width > 100:
imthumb = im.thumbnail((height/4, width/4))
im.save("%s/%s-thumbnail.png" % (imagedir, unpackreport['sha256']))
'''
## generate histogram
p = subprocess.Popen(['python', '/home/armijn/gpltool/trunk/bat-extratools/bat-visualisation/bat-generate-histogram.py', '-i', filename, '-o', '%s/%s-histogram.png' % (imagedir, unpackreport['sha256'])], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
print >>sys.stderr, stanerr
'''
| #!/usr/bin/python
## Binary Analysis Tool
## Copyright 2012 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This is a plugin for the Binary Analysis Tool. It generates images of files, both
full files and thumbnails. The files can be used for informational purposes, such
as detecting roughly where offsets can be found, if data is compressed or encrypted,
etc.
This should be run as a postrun scan
'''
import os, os.path, sys, subprocess, array
from PIL import Image
def generateImages(filename, unpackreport, leafscans, envvars={}):
if not unpackreport.has_key('sha256'):
return
scanenv = os.environ
if envvars != None:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
scanenv[envname] = envvalue
except Exception, e:
pass
## TODO: check if BAT_IMAGEDIR exists
imagedir = scanenv.get('BAT_IMAGEDIR', '.')
fwfile = open(filename)
## this is very inefficient for large files, but we *really* need all the data :-(
fwdata = fwfile.read()
fwfile.close()
fwlen = len(fwdata)
if fwlen > 1024:
height = 1024
else:
height = fwlen
width = fwlen/height
## we might need to add some bytes so we can create a valid picture
if fwlen%height > 0:
width = width + 1
for i in range(0, height - (fwlen%height)):
fwdata = fwdata + chr(0)
imgbuffer = buffer(bytearray(fwdata))
im = Image.frombuffer("L", (height, width), imgbuffer, "raw", "L", 0, 1)
im.save("%s/%s.png" % (imagedir, unpackreport['sha256']))
if width > 100:
imthumb = im.thumbnail((height/4, width/4))
im.save("%s/%s-thumbnail.png" % (imagedir, unpackreport['sha256']))
'''
p = subprocess.Popen(['python', '/home/armijn/gpltool/trunk/bat-extratools/bat-visualisation/bat-generate-histogram.py', '-i', filename, '-o', '%s/%s-histogram.png' % (imagedir, unpackreport['sha256'])], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
print >>sys.stderr, stanerr
'''
| Python | 0 |
7a60bd74b3af40223553c64dafed07c46c5db639 | add a --jit commandline option | prolog/targetprologstandalone.py | prolog/targetprologstandalone.py | """
A simple standalone target for the prolog interpreter.
"""
import sys
from prolog.interpreter.translatedmain import repl, execute
# __________ Entry point __________
from prolog.interpreter.continuation import Engine, jitdriver
from prolog.interpreter import term
from prolog.interpreter import arithmetic # for side effects
from prolog import builtin # for side effects
e = Engine(load_system=True)
term.DEBUG = False
def entry_point(argv):
e.clocks.startup()
# XXX crappy argument handling
for i in range(len(argv)):
if argv[i] == "--jit":
if len(argv) == i + 1:
print "missing argument after --jit"
return 2
jitarg = argv[i + 1]
del argv[i:i+2]
jitdriver.set_user_param(jitarg)
break
if len(argv) == 2:
execute(e, argv[1])
if len(argv) > 2:
print "too many arguments"
return 2
try:
repl(e)
except SystemExit:
return 1
return 0
# _____ Define and setup target ___
def target(driver, args):
driver.exe_name = 'pyrolog-%(backend)s'
return entry_point, None
def portal(driver):
from prolog.interpreter.portal import get_portal
return get_portal(driver)
def jitpolicy(self):
from pypy.jit.codewriter.policy import JitPolicy
return JitPolicy()
if __name__ == '__main__':
entry_point(sys.argv)
| """
A simple standalone target for the prolog interpreter.
"""
import sys
from prolog.interpreter.translatedmain import repl, execute
# __________ Entry point __________
from prolog.interpreter.continuation import Engine
from prolog.interpreter import term
from prolog.interpreter import arithmetic # for side effects
from prolog import builtin # for side effects
e = Engine(load_system=True)
term.DEBUG = False
def entry_point(argv):
e.clocks.startup()
if len(argv) == 2:
execute(e, argv[1])
try:
repl(e)
except SystemExit:
return 1
return 0
# _____ Define and setup target ___
def target(driver, args):
driver.exe_name = 'pyrolog-%(backend)s'
return entry_point, None
def portal(driver):
from prolog.interpreter.portal import get_portal
return get_portal(driver)
def jitpolicy(self):
from pypy.jit.codewriter.policy import JitPolicy
return JitPolicy()
if __name__ == '__main__':
entry_point(sys.argv)
| Python | 0.000002 |
3f1f86c358efc6d38012191c4b613aa775861805 | Fix 'graph3d.py' to read from VTKData directory | Examples/Infovis/Python/graph3d.py | Examples/Infovis/Python/graph3d.py | from vtk import *
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
reader = vtkXGMLReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/Infovis/fsm.gml")
reader.Update()
strategy = vtkSpanTreeLayoutStrategy()
strategy.DepthFirstSpanningTreeOn()
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(reader.GetOutputPort())
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetLayoutStrategy( strategy )
view.SetInteractionModeTo3D() # Left mouse button causes 3D rotate instead of zoom
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
theme.SetLineWidth(2)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
#Here's the window with David's original layout methodology
# Aside from the theme elements in the view above, the notable
# difference between the two views is the angling on the edges.
layout = vtkGraphLayout()
layout.SetLayoutStrategy(strategy)
layout.SetInputConnection(reader.GetOutputPort())
edge_geom = vtkGraphToPolyData()
edge_geom.SetInputConnection(layout.GetOutputPort())
vertex_geom = vtkGraphToPoints()
vertex_geom.SetInputConnection(layout.GetOutputPort())
# Vertex pipeline - mark each vertex with a cube glyph
cube = vtkCubeSource()
cube.SetXLength(0.3)
cube.SetYLength(0.3)
cube.SetZLength(0.3)
glyph = vtkGlyph3D()
glyph.SetInputConnection(vertex_geom.GetOutputPort())
glyph.SetSourceConnection(0, cube.GetOutputPort())
gmap = vtkPolyDataMapper()
gmap.SetInputConnection(glyph.GetOutputPort())
gact = vtkActor()
gact.SetMapper(gmap)
gact.GetProperty().SetColor(0,0,1)
# Edge pipeline - map edges to lines
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(edge_geom.GetOutputPort())
actor = vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.4,0.4,0.6)
# Renderer, window, and interaction
ren = vtkRenderer()
ren.AddActor(actor)
ren.AddActor(gact)
ren.ResetCamera()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(800,550)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#iren.Start()
view.GetInteractor().Start()
| from vtk import *
reader = vtkXGMLReader()
reader.SetFileName("fsm.gml")
reader.Update()
strategy = vtkSpanTreeLayoutStrategy()
strategy.DepthFirstSpanningTreeOn()
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(reader.GetOutputPort())
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetLayoutStrategy( strategy )
view.SetInteractionModeTo3D() # Left mouse button causes 3D rotate instead of zoom
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
theme.SetLineWidth(2)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
#Here's the window with David's original layout methodology
# Aside from the theme elements in the view above, the notable
# difference between the two views is the angling on the edges.
layout = vtkGraphLayout()
layout.SetLayoutStrategy(strategy)
layout.SetInputConnection(reader.GetOutputPort())
edge_geom = vtkGraphToPolyData()
edge_geom.SetInputConnection(layout.GetOutputPort())
vertex_geom = vtkGraphToPoints()
vertex_geom.SetInputConnection(layout.GetOutputPort())
# Vertex pipeline - mark each vertex with a cube glyph
cube = vtkCubeSource()
cube.SetXLength(0.3)
cube.SetYLength(0.3)
cube.SetZLength(0.3)
glyph = vtkGlyph3D()
glyph.SetInputConnection(vertex_geom.GetOutputPort())
glyph.SetSourceConnection(0, cube.GetOutputPort())
gmap = vtkPolyDataMapper()
gmap.SetInputConnection(glyph.GetOutputPort())
gact = vtkActor()
gact.SetMapper(gmap)
gact.GetProperty().SetColor(0,0,1)
# Edge pipeline - map edges to lines
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(edge_geom.GetOutputPort())
actor = vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.4,0.4,0.6)
# Renderer, window, and interaction
ren = vtkRenderer()
ren.AddActor(actor)
ren.AddActor(gact)
ren.ResetCamera()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(800,550)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#iren.Start()
view.GetInteractor().Start()
| Python | 0 |
0cd2af0f20b6b544f0d36140a098ca8e3058d8fa | Update constants | node/constants.py | node/constants.py | ######### KADEMLIA CONSTANTS ###########
#: Small number Representing the degree of parallelism in network calls
alpha = 3
#: Maximum number of contacts stored in a bucket; this should be an even number
k = 8
#: Timeout for network operations (in seconds)
rpcTimeout = 5
# Delay between iterations of iterative node lookups (for loose parallelism) (in seconds)
iterativeLookupDelay = rpcTimeout / 2
#: If a k-bucket has not been used for this amount of time, refresh it (in seconds)
refreshTimeout = 3600 # 1 hour
#: The interval at which nodes replicate (republish/refresh) data they are holding
replicateInterval = refreshTimeout
# The time it takes for data to expire in the network; the original publisher of the data
# will also republish the data at this time if it is still valid
dataExpireTimeout = 86400 # 24 hours
######## IMPLEMENTATION-SPECIFIC CONSTANTS ###########
#: The interval in which the node should check its whether any buckets need refreshing,
#: or whether any data needs to be republished (in seconds)
checkRefreshInterval = refreshTimeout/5
#: Max size of a single UDP datagram, in bytes. If a message is larger than this, it will
#: be spread accross several UDP packets.
udpDatagramMaxSize = 8192 # 8 KB
| ######### KADEMLIA CONSTANTS ###########
#: Small number Representing the degree of parallelism in network calls
alpha = 3
#: Maximum number of contacts stored in a bucket; this should be an even number
k = 8
# Delay between iterations of iterative node lookups (for loose parallelism) (in seconds)
iterativeLookupDelay = rpcTimeout / 2
#: If a k-bucket has not been used for this amount of time, refresh it (in seconds)
refreshTimeout = 3600 # 1 hour
#: The interval at which nodes replicate (republish/refresh) data they are holding
replicateInterval = refreshTimeout
# The time it takes for data to expire in the network; the original publisher of the data
# will also republish the data at this time if it is still valid
dataExpireTimeout = 86400 # 24 hours
######## IMPLEMENTATION-SPECIFIC CONSTANTS ###########
#: The interval in which the node should check its whether any buckets need refreshing,
#: or whether any data needs to be republished (in seconds)
checkRefreshInterval = refreshTimeout/5
| Python | 0.000001 |
8765ac953047ba1c63eb2eb2eb087ba92e9213bc | fix switch template | Firefly/core/templates/__init__.py | Firefly/core/templates/__init__.py | # -*- coding: utf-8 -*-
# @Author: Zachary Priddy
# @Date: 2016-04-12 13:33:30
# @Last Modified by: Zachary Priddy
# @Last Modified time: 2016-04-12 13:33:30
class Templates(object):
def __init__(self):
self._filepath = 'core/templates/'
self._switch_template = self.get_template('switch')
def get_template(self, template):
with open('%s%s.html' % (self._filepath, template)) as template_file:
return template_file.read().replace('\n', '')
@property
def switch(self):
"""
Builds a switch template from switch.html.
Returns:
template (str): string of switch template
"""
return self._switch_template
ffTemplates = Templates()
| # -*- coding: utf-8 -*-
# @Author: Zachary Priddy
# @Date: 2016-04-12 13:33:30
# @Last Modified by: Zachary Priddy
# @Last Modified time: 2016-04-12 13:33:30
class Templates(object):
def __init__(self):
self._filepath = 'core/templates/'
self._switch_template = self.get_template('switch')
def get_template(self, template):
with open('%s%s.html' % (self._filepath, template)) as template_file:
return template_file.read().replace('\n', '')
@property
def switch(self):
"""
Builds a switch template from switch.html.
Returns:
template (str): string of switch template
"""
return self._switch
ffTemplates = Templates()
| Python | 0.000001 |
85fe9b8b48b565488406343de41fa77b41357e4a | define skip | ooiservices/tests/test_models.py | ooiservices/tests/test_models.py | #!/usr/bin/env python
'''
unit testing for the model classes.
'''
__author__ = 'M@Campbell'
import unittest
from flask import url_for
from ooiservices.app import create_app, db
from ooiservices.app.models import Array, InstrumentDeployment, PlatformDeployment, Stream, \
StreamParameter, User, OperatorEvent, OperatorEventType, Organization
from unittest import skipIf
'''
These tests are additional to the normal testing performed by coverage; each of
these tests are to validate model logic outside of db management.
'''
class ModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('TESTING_CONFIG')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client(use_cookies=False)
Organization.insert_org()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_array(self):
#Test the json in the object
array = Array()
self.assertTrue(array.to_json() == {'id': None, 'array_code': None, \
'array_name': None, 'description': None, 'display_name': None, \
'geo_location': None})
def test_platform_deployment(self):
#Test the json in the object
platform_deployment = PlatformDeployment()
self.assertTrue(platform_deployment.to_json() == {'id': None, \
'array_id': None, 'display_name': None, 'end_date': None, \
'geo_location': None, 'reference_designator': None, 'start_date': None})
def test_instrument_deployment(self):
#Test the json in the object
instrument_deployment = InstrumentDeployment()
should_be = {
'id' :None,
'depth': None,
'display_name' : None,
'end_date' : None,
'geo_location': None,
'platform_deployment_id' : None,
'reference_designator' : None,
'start_date' : None
}
self.assertEquals(instrument_deployment.to_json() , should_be)
@skipIf(os.getenv('TRAVIS'), 'Skip if testing from Travis CI.')
def test_stream(self):
#Test the json in the object
stream = Stream()
self.assertTrue(stream.to_json() == {'id': None, 'description': None, \
'instrument_id': None, 'stream_name': None})
def test_parameter(self):
#Test the json in the object
stream_param = StreamParameter()
self.assertTrue(stream_param.to_json() == {'id': None, 'data_type': None, \
'long_name': None, 'parameter_name': None, 'short_name': None, \
'standard_name': None, 'units': None})
def test_user(self):
#Test the json in the object
user = User()
self.assertEquals(user.to_json(), {
'email': None,
'id': None,
'user_id': None,
'active':None,
'first_name': None,
'last_name' : None,
'organization_id' : None,
'phone_alternate' : None,
'phone_primary' : None,
'scopes' : [],
'role' : None,
'user_name': None,
'email_opt_in': None})
def test_operator_event_type(self):
#Test the json in the object
operator_event_type = OperatorEventType()
self.assertTrue(operator_event_type.to_json() == {'id': None, 'type_name': None, 'type_description': None})
def test_geometry(self):
platform_deployment = PlatformDeployment()
platform_deployment.reference_designator = 'TEST0000'
platform_deployment.geo_location = 'POINT(-70 40)'
db.session.add(platform_deployment)
db.session.commit()
pd = PlatformDeployment.query.filter(PlatformDeployment.reference_designator=='TEST0000').first()
self.assertEquals(pd.geojson, {'coordinates': [-70, 40], 'type': 'Point'})
| #!/usr/bin/env python
'''
unit testing for the model classes.
'''
__author__ = 'M@Campbell'
import unittest
from flask import url_for
from ooiservices.app import create_app, db
from ooiservices.app.models import Array, InstrumentDeployment, PlatformDeployment, Stream, \
StreamParameter, User, OperatorEvent, OperatorEventType, Organization
'''
These tests are additional to the normal testing performed by coverage; each of
these tests are to validate model logic outside of db management.
'''
class ModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('TESTING_CONFIG')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client(use_cookies=False)
Organization.insert_org()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_array(self):
#Test the json in the object
array = Array()
self.assertTrue(array.to_json() == {'id': None, 'array_code': None, \
'array_name': None, 'description': None, 'display_name': None, \
'geo_location': None})
def test_platform_deployment(self):
#Test the json in the object
platform_deployment = PlatformDeployment()
self.assertTrue(platform_deployment.to_json() == {'id': None, \
'array_id': None, 'display_name': None, 'end_date': None, \
'geo_location': None, 'reference_designator': None, 'start_date': None})
def test_instrument_deployment(self):
#Test the json in the object
instrument_deployment = InstrumentDeployment()
should_be = {
'id' :None,
'depth': None,
'display_name' : None,
'end_date' : None,
'geo_location': None,
'platform_deployment_id' : None,
'reference_designator' : None,
'start_date' : None
}
self.assertEquals(instrument_deployment.to_json() , should_be)
@skipIf(os.getenv('TRAVIS'), 'Skip if testing from Travis CI.')
def test_stream(self):
#Test the json in the object
stream = Stream()
self.assertTrue(stream.to_json() == {'id': None, 'description': None, \
'instrument_id': None, 'stream_name': None})
def test_parameter(self):
#Test the json in the object
stream_param = StreamParameter()
self.assertTrue(stream_param.to_json() == {'id': None, 'data_type': None, \
'long_name': None, 'parameter_name': None, 'short_name': None, \
'standard_name': None, 'units': None})
def test_user(self):
#Test the json in the object
user = User()
self.assertEquals(user.to_json(), {
'email': None,
'id': None,
'user_id': None,
'active':None,
'first_name': None,
'last_name' : None,
'organization_id' : None,
'phone_alternate' : None,
'phone_primary' : None,
'scopes' : [],
'role' : None,
'user_name': None,
'email_opt_in': None})
def test_operator_event_type(self):
#Test the json in the object
operator_event_type = OperatorEventType()
self.assertTrue(operator_event_type.to_json() == {'id': None, 'type_name': None, 'type_description': None})
def test_geometry(self):
platform_deployment = PlatformDeployment()
platform_deployment.reference_designator = 'TEST0000'
platform_deployment.geo_location = 'POINT(-70 40)'
db.session.add(platform_deployment)
db.session.commit()
pd = PlatformDeployment.query.filter(PlatformDeployment.reference_designator=='TEST0000').first()
self.assertEquals(pd.geojson, {'coordinates': [-70, 40], 'type': 'Point'})
| Python | 0.000207 |
38de795103748ca757a03a62da8ef3d89b0bf682 | Fix bug that prevent commands with no values from being added | GoProController/models.py | GoProController/models.py | from django.db import models
class Camera(models.Model):
ssid = models.CharField(max_length=255)
password = models.CharField(max_length=255)
date_added = models.DateTimeField(auto_now_add=True)
last_attempt = models.DateTimeField(auto_now=True)
last_update = models.DateTimeField(null=True, blank=True)
image_last_update = models.DateTimeField(null=True, blank=True)
image = models.TextField(blank=True)
summary = models.TextField(blank=True)
status = models.TextField(blank=True)
connection_attempts = models.IntegerField(default=0)
connection_failures = models.IntegerField(default=0)
def __unicode__(self):
return self.ssid
class Command(models.Model):
camera = models.ForeignKey(Camera)
command = models.CharField(max_length=255)
value = models.CharField(blank=True, max_length=255)
date_added = models.DateTimeField(auto_now_add=True)
time_completed = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return self.camera.__unicode__() + ' > ' + self.command
| from django.db import models
class Camera(models.Model):
ssid = models.CharField(max_length=255)
password = models.CharField(max_length=255)
date_added = models.DateTimeField(auto_now_add=True)
last_attempt = models.DateTimeField(auto_now=True)
last_update = models.DateTimeField(null=True, blank=True)
image_last_update = models.DateTimeField(null=True, blank=True)
image = models.TextField(blank=True)
summary = models.TextField(blank=True)
status = models.TextField(blank=True)
connection_attempts = models.IntegerField(default=0)
connection_failures = models.IntegerField(default=0)
def __unicode__(self):
return self.ssid
class Command(models.Model):
camera = models.ForeignKey(Camera)
command = models.CharField(max_length=255)
value = models.CharField(max_length=255)
date_added = models.DateTimeField(auto_now_add=True)
time_completed = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return self.camera.__unicode__() + ' > ' + self.command
| Python | 0 |
e1ad05fb19577aa108b94ea500106e36b29915fc | update indentation | amount_raised_by_candidate.py | amount_raised_by_candidate.py | # Written by Jonathan Saewitz, released May 24th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
import csv, plotly.plotly as plotly, plotly.graph_objs as go, requests
from bs4 import BeautifulSoup
candidates=[]
with open('presidential_candidates.csv', 'r') as f:
reader=csv.reader(f)
reader.next() #skip the headers row
for row in reader: #loop through the candidates
c_id=row[15] #row[15] is the candidate's FEC id
html=requests.get('https://beta.fec.gov/data/candidate/' + c_id).text #get the candidate's FEC page
b=BeautifulSoup(html, 'html.parser')
if len(b.find_all(class_='t-big-data'))==0: #if this class isn't found on the candidate's FEC page,
#the candidate raised $0
amt=0.0
else:
amt=float(b.find_all(class_="t-big-data")[0].text.strip().replace("$", "").replace(",", ""))
#class "t-big-data" contains the money data
#the 0th element contains the total receipts
#.text gets only the text (i.e. amount raised)
#.strip() removes all whitespace
#.replace("$", "") removes the dollar sign
#.replace(",", "") removes all commas
#we should be left with the total amount raised in the form 0.00
name=row[14] #row[14] is the candidate's name
candidates.append({'name': name, 'amount': amt})
candidates=sorted(candidates, key=lambda k: k['amount']) #sort the candidates by amount raised
trace=go.Bar(
x=[candidate['name'] for candidate in candidates],
y=[candidate['amount'] for candidate in candidates]
)
layout=go.Layout(
title="Presidential Candidates by Money Raised",
xaxis=dict(
title="Candidates",
),
yaxis=dict(
title="Amount raised ($)",
)
)
data=[trace]
fig=dict(data=data, layout=layout)
plotly.plot(fig)
| # Written by Jonathan Saewitz, released May 24th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
import csv, plotly.plotly as plotly, plotly.graph_objs as go, requests
from bs4 import BeautifulSoup
candidates=[]
with open('presidential_candidates.csv', 'r') as f:
reader=csv.reader(f)
reader.next() #skip the headers row
for row in reader: #loop through the candidates
c_id=row[15] #row[15] is the candidate's FEC id
html=requests.get('https://beta.fec.gov/data/candidate/' + c_id).text #get the candidate's FEC page
b=BeautifulSoup(html, 'html.parser')
if len(b.find_all(class_='t-big-data'))==0: #if this class isn't found on the candidate's FEC page,
#the candidate raised $0
amt=0.0
else:
amt=float(b.find_all(class_="t-big-data")[0].text.strip().replace("$", "").replace(",", ""))
#class "t-big-data" contains the money data
#the 0th element contains the total receipts
#.text gets only the text (i.e. amount raised)
#.strip() removes all whitespace
#.replace("$", "") removes the dollar sign
#.replace(",", "") removes all commas
#we should be left with the total amount raised in the form 0.00
name=row[14] #row[14] is the candidate's name
candidates.append({'name': name, 'amount': amt})
candidates=sorted(candidates, key=lambda k: k['amount']) #sort the candidates by amount raised
trace=go.Bar(
x=[candidate['name'] for candidate in candidates],
y=[candidate['amount'] for candidate in candidates]
)
layout=go.Layout(
title="Presidential Candidates by Money Raised",
xaxis=dict(
title="Candidates",
),
yaxis=dict(
title="Amount raised ($)",
)
)
data=[trace]
fig=dict(data=data, layout=layout)
plotly.plot(fig)
| Python | 0.000001 |
e7163abf13e5cec78f3cd894bd3b8393f9cea6d2 | Fix counting total samples in cast view. | genome_designer/main/data_util.py | genome_designer/main/data_util.py | """
Common methods for getting data from the backend.
These methods are intended to be used by both views.py, which should define
only pages, and xhr_handlers.py, which are intended to respond to AJAX
requests.
This module interacts closely with the ModelViews in model_views.py.
"""
from collections import defaultdict
from django.db import connection
from main.model_views import CastVariantView
from main.model_views import MeltedVariantView
from main.models import ExperimentSample
from main.models import Variant
from main.models import VariantEvidence
from variants.common import dictfetchall
from variants.materialized_variant_filter import get_variants_that_pass_filter
class LookupVariantsResult(object):
"""Result of a call to lookup_variants.
Attributes:
result_list: List of cast or melted Variant objects.
num_total_variants: Total number of variants that match query.
For pagination.
"""
def __init__(self, result_list, num_total_variants):
self.result_list = result_list
self.num_total_variants = num_total_variants
def lookup_variants(reference_genome, combined_filter_string, is_melted,
pagination_start, pagination_len):
"""Manages the end-to-end flow of looking up Variants that match the
given filter.
This function delegates to the variant_filter module to get the list of
variants matching the filter. Then, this function takes those results
and handles casting them to appropriate view-type objects (e.g. Melted vs
Cast).
Returns:
LookupVariantsResult object.
"""
# First get the Variants that pass the filter.
filter_eval_result = get_variants_that_pass_filter(combined_filter_string,
reference_genome)
result_list = list(filter_eval_result.variant_set)
# If this is a melted view, return results as they are.
if is_melted:
# TODO: Handle pagination.
page_results = result_list[pagination_start :
pagination_start + pagination_len]
num_total_variants = 1000000
return LookupVariantsResult(page_results, num_total_variants)
# Otherwise, we need to Cast the results.
page_results = cast_joined_variant_objects(result_list)
page_results = page_results[pagination_start :
pagination_start + pagination_len]
num_total_variants = 1000000
return LookupVariantsResult(page_results, num_total_variants)
def cast_joined_variant_objects(melted_variant_list):
"""Converts the list of melted variants into a cast representation.
This means returning one row per variant, compressing columns
into an aggregate representation. For example, in this initial
implementation, the 'experiment_sample_uid' column becomes 'total_samples'.
"""
cast_obj_list = []
# First, we build a structure from variant id to list of result rows.
variant_id_to_result_row = defaultdict(list)
for result in melted_variant_list:
variant_id_to_result_row[result['id']].append(result)
for variant_id, result_row_list in variant_id_to_result_row.iteritems():
assert len(result_row_list), "Not expected. Debug."
position = result_row_list[0]['position']
ref = result_row_list[0]['ref']
uid = result_row_list[0]['uid']
# Count total samples.
total_samples = 0
for row in result_row_list:
if row['experiment_sample_uid']:
total_samples += 1
cast_obj_list.append({
'id': variant_id,
'uid': uid,
'position': position,
'ref': ref,
'alt': 'TODO',
'total_samples': total_samples
})
return cast_obj_list
| """
Common methods for getting data from the backend.
These methods are intended to be used by both views.py, which should define
only pages, and xhr_handlers.py, which are intended to respond to AJAX
requests.
This module interacts closely with the ModelViews in model_views.py.
"""
from collections import defaultdict
from django.db import connection
from main.model_views import CastVariantView
from main.model_views import MeltedVariantView
from main.models import ExperimentSample
from main.models import Variant
from main.models import VariantEvidence
from variants.common import dictfetchall
from variants.materialized_variant_filter import get_variants_that_pass_filter
class LookupVariantsResult(object):
"""Result of a call to lookup_variants.
Attributes:
result_list: List of cast or melted Variant objects.
num_total_variants: Total number of variants that match query.
For pagination.
"""
def __init__(self, result_list, num_total_variants):
self.result_list = result_list
self.num_total_variants = num_total_variants
def lookup_variants(reference_genome, combined_filter_string, is_melted,
pagination_start, pagination_len):
"""Manages the end-to-end flow of looking up Variants that match the
given filter.
This function delegates to the variant_filter module to get the list of
variants matching the filter. Then, this function takes those results
and handles casting them to appropriate view-type objects (e.g. Melted vs
Cast).
Returns:
LookupVariantsResult object.
"""
# First get the Variants that pass the filter.
filter_eval_result = get_variants_that_pass_filter(combined_filter_string,
reference_genome)
result_list = list(filter_eval_result.variant_set)
# If this is a melted view, return results as they are.
if is_melted:
# TODO: Handle pagination.
page_results = result_list[pagination_start :
pagination_start + pagination_len]
num_total_variants = 1000000
return LookupVariantsResult(page_results, num_total_variants)
# Otherwise, we need to Cast the results.
page_results = cast_joined_variant_objects(result_list)
page_results = page_results[pagination_start :
pagination_start + pagination_len]
num_total_variants = 1000000
return LookupVariantsResult(page_results, num_total_variants)
def cast_joined_variant_objects(melted_variant_list):
"""Converts the list of melted variants into a cast representation.
This means returning one row per variant, compressing other columns
into an aggregate representation. For example, the 'experiment_sample_uid'
column becomes the 'total_samples'.
"""
cast_obj_list = []
# First, we build a structure from variant id to list of result rows.
variant_id_to_result_row = defaultdict(list)
for result in melted_variant_list:
variant_id_to_result_row[result['id']].append(result)
for variant_id, result_row_list in variant_id_to_result_row.iteritems():
assert len(result_row_list), "Not expected. Debug."
position = result_row_list[0]['position']
ref = result_row_list[0]['ref']
uid = result_row_list[0]['uid']
total_samples = len(result_row_list)
cast_obj_list.append({
'id': variant_id,
'uid': uid,
'position': position,
'ref': ref,
'alt': 'TODO',
'total_samples': total_samples
})
return cast_obj_list
| Python | 0 |
a391da79f8213d26246234e489d0947b8b4b2a82 | Update to allo no CSRF when logining in on mobile | OctaHomeCore/authviews.py | OctaHomeCore/authviews.py | from django.contrib.auth import authenticate, login, logout
from OctaHomeCore.baseviews import *
from OctaHomeCore.models import *
from django.views.decorators.csrf import csrf_exempt
class handleLoginView(viewRequestHandler):
loginToken = ''
def handleRequest(self):
if self.Request.user.is_authenticated():
return super(handleLoginView, self).handleRequest()
if self.Post.has_key('username') and self.Post.has_key('password'):
user = authenticate(username=self.Post['username'], password=self.Post['password'])
if user is not None and user.authy_id != "":
self.loginToken = user.get_login_token()
elif user is not None:
login(self.Request, user)
elif self.Post.has_key('authytoken') and self.Post.has_key('logintoken'):
user = CustomUser().objects.authyCheck(self.Post['username'], self.Post['logintoken'], self.Post['authytoken'])
if user is not None:
login(self.Request, user)
return super(handleLoginView, self).handleRequest()
def getTemplate(self):
if self.Request.user != None and self.Request.user.is_authenticated():
if self.Post.has_key('next') and self.Post['next'] != '':
self.redirect(self.Post['next'])
else:
self.redirect(reverse('Home'))
return ''
if self.loginToken:
return 'OctaHomeCore/pages/Account/AuthyLogin'
else:
return 'OctaHomeCore/pages/Account/Login'
def getViewParameters(self):
parameters = {}
if self.Post.has_key('next'):
parameters.update({ 'next':self.Post['next'] })
if self.loginToken != '' and self.Post.has_key('username'):
parameters.update({ 'username':self.Post['username'], 'logintoken':self.loginToken })
return parameters
def getSideBar(self):
return []
def getSidebarUrlName(self):
return ''
def isPageSecured(self):
return False
class handleDeviceLoginView(viewRequestHandler):
@csrf_exempt
def post(self, request, *args, **kwargs):
return super(handleLoginView, self).post(self, request, *args, **kwargs)
def handleRequest(self):
if self.Request.user.is_authenticated():
return super(handleLoginView, self).handleRequest()
if self.Post.has_key('loginToken'):
loginItems = self.Post['loginToken'].split(",")
if len(loginItems) == 2:
device = DeviceUser.objects.get(pk=loginItems[0])
if device is not None and device.User is not None and device.checkToken(loginItems[1]):
login(self.Request, device.User)
return super(handleLoginView, self).handleRequest()
def getTemplate(self):
if self.Request.user != None and self.Request.user.is_authenticated():
if self.Post.has_key('next') and self.Post['next'] != '':
self.redirect(self.Post['next'])
else:
self.redirect(reverse('Home'))
return ''
return 'OctaHomeCore/pages/Account/Login'
def getViewParameters(self):
parameters = {}
if self.Post.has_key('next'):
parameters.update({ 'next':self.Post['next'] })
if self.loginToken != '' and self.Post.has_key('username'):
parameters.update({ 'username':self.Post['username'], 'logintoken':self.loginToken })
return parameters
def getSideBar(self):
return []
def getSidebarUrlName(self):
return ''
def isPageSecured(self):
return False
class handleLogOutView(viewRequestHandler):
def handleRequest(self):
logout(self.Request)
return redirect(reverse('Home')) | from django.contrib.auth import authenticate, login, logout
from OctaHomeCore.baseviews import *
from OctaHomeCore.models import *
class handleLoginView(viewRequestHandler):
loginToken = ''
def handleRequest(self):
if self.Request.user.is_authenticated():
return super(handleLoginView, self).handleRequest()
if self.Post.has_key('username') and self.Post.has_key('password'):
user = authenticate(username=self.Post['username'], password=self.Post['password'])
if user is not None and user.authy_id != "":
self.loginToken = user.get_login_token()
elif user is not None:
login(self.Request, user)
elif self.Post.has_key('authytoken') and self.Post.has_key('logintoken'):
user = CustomUser().objects.authyCheck(self.Post['username'], self.Post['logintoken'], self.Post['authytoken'])
if user is not None:
login(self.Request, user)
return super(handleLoginView, self).handleRequest()
def getTemplate(self):
if self.Request.user != None and self.Request.user.is_authenticated():
if self.Post.has_key('next') and self.Post['next'] != '':
self.redirect(self.Post['next'])
else:
self.redirect(reverse('Home'))
return ''
if self.loginToken:
return 'OctaHomeCore/pages/Account/AuthyLogin'
else:
return 'OctaHomeCore/pages/Account/Login'
def getViewParameters(self):
parameters = {}
if self.Post.has_key('next'):
parameters.update({ 'next':self.Post['next'] })
if self.loginToken != '' and self.Post.has_key('username'):
parameters.update({ 'username':self.Post['username'], 'logintoken':self.loginToken })
return parameters
def getSideBar(self):
return []
def getSidebarUrlName(self):
return ''
def isPageSecured(self):
return False
class handleDeviceLoginView(viewRequestHandler):
def handleRequest(self):
if self.Request.user.is_authenticated():
return super(handleLoginView, self).handleRequest()
if self.Post.has_key('loginToken'):
loginItems = self.Post['loginToken'].split(",")
if len(loginItems) == 2:
device = DeviceUser.objects.get(pk=loginItems[0])
if device is not None and device.User is not None and device.checkToken(loginItems[1]):
login(self.Request, device.User)
return super(handleLoginView, self).handleRequest()
def getTemplate(self):
if self.Request.user != None and self.Request.user.is_authenticated():
if self.Post.has_key('next') and self.Post['next'] != '':
self.redirect(self.Post['next'])
else:
self.redirect(reverse('Home'))
return ''
return 'OctaHomeCore/pages/Account/Login'
def getViewParameters(self):
parameters = {}
if self.Post.has_key('next'):
parameters.update({ 'next':self.Post['next'] })
if self.loginToken != '' and self.Post.has_key('username'):
parameters.update({ 'username':self.Post['username'], 'logintoken':self.loginToken })
return parameters
def getSideBar(self):
return []
def getSidebarUrlName(self):
return ''
def isPageSecured(self):
return False
class handleLogOutView(viewRequestHandler):
def handleRequest(self):
logout(self.Request)
return redirect(reverse('Home')) | Python | 0 |
caff96633ce29a2139bc61bb5ee333efd69d50ef | Remove default classifier path from default config | processmysteps/default_config.py | processmysteps/default_config.py | """
Base line settings
"""
CONFIG = {
'input_path': None,
'backup_path': None,
'dest_path': None,
'life_all': None,
'db': {
'host': None,
'port': None,
'name': None,
'user': None,
'pass': None
},
# 'preprocess': {
# 'max_acc': 30.0
# },
'smoothing': {
'use': True,
'algorithm': 'inverse',
'noise': 10
},
'segmentation': {
'use': True,
'epsilon': 1.0,
'min_time': 80
},
'simplification': {
'max_dist_error': 2.0,
'max_speed_error': 1.0,
'eps': 0.15
},
'location': {
'max_distance': 20,
'min_samples': 2,
'limit': 5,
'google_key': ''
},
'transportation': {
'remove_stops': False,
'min_time': 60,
'classifier_path': None#'classifier.data'# None
},
'trip_learning': {
'epsilon': 0.0,
'classifier_path': None,
},
'trip_name_format': '%Y-%m-%d'
}
| """
Base line settings
"""
CONFIG = {
'input_path': None,
'backup_path': None,
'dest_path': None,
'life_all': None,
'db': {
'host': None,
'port': None,
'name': None,
'user': None,
'pass': None
},
# 'preprocess': {
# 'max_acc': 30.0
# },
'smoothing': {
'use': True,
'algorithm': 'inverse',
'noise': 10
},
'segmentation': {
'use': True,
'epsilon': 1.0,
'min_time': 80
},
'simplification': {
'max_dist_error': 2.0,
'max_speed_error': 1.0,
'eps': 0.15
},
'location': {
'max_distance': 20,
'min_samples': 2,
'limit': 5,
'google_key': ''
},
'transportation': {
'remove_stops': False,
'min_time': 60,
'classifier_path': 'classifier.data'# None
},
'trip_learning': {
'epsilon': 0.0,
'classifier_path': None,
},
'trip_name_format': '%Y-%m-%d'
}
| Python | 0.000001 |
22f9b4bacbb0662d3c4de67218ff43cea9588f66 | Add keyword argument handling to unicode decorator | crypto_enigma/utils.py | crypto_enigma/utils.py | #!/usr/bin/env python
# encoding: utf8
# Copyright (C) 2015 by Roy Levien.
# This file is part of crypto-enigma, an Enigma Machine simulator.
# released under the BSD-3 License (see LICENSE.txt).
"""
Description
.. note::
Any additional note.
"""
from __future__ import (absolute_import, print_function, division, unicode_literals)
import time
import sys
# TBD - Generalize to other platforms; test?
def print_over(s, backup=True, delay=0.2):
if backup:
print('', end='\r')
print("\033[F" * (s.count('\n')+2))
print(s)
sys.stdout.flush()
time.sleep(delay)
def num_A0(c):
return ord(c) - ord('A')
def chr_A0(n):
return chr(n + ord('A'))
def ordering(items):
return [i[1] for i in sorted(zip(items, range(0, len(items))))]
# standard simple-substitution cypher encoding
def encode_char(mapping, ch):
if ch == ' ':
return ' '
else:
return mapping[num_A0(ch)]
def encode_string(mapping, string):
return ''.join([encode_char(mapping, ch) for ch in string])
# scan, because it's missing from Python; implemented to anticipate Python 3
def accumulate(l, f):
it = iter(l)
total = next(it)
yield total
for element in it:
total = f(total, element)
yield total
# also missing from Python
def chunk_of(it, n):
return [it[i:i+n] for i in range(0, len(it), n)]
# require unicode strings (see unicode_literal in enigma.py)
# http://stackoverflow.com/a/33743668/656912
# http://code.activestate.com/recipes/454322-type-checking-decorator/
def require_unicode(*given_arg_names):
def check_types(_func_, *args, **kwargs):
def modified(*args, **kwargs):
arg_names = list(_func_.func_code.co_varnames[:_func_.func_code.co_argcount])
if len(given_arg_names) == 0:
unicode_arg_names = arg_names
#unicode_arg_names = arg_names
else:
unicode_arg_names = given_arg_names
for unicode_arg_name in unicode_arg_names:
try:
arg_index = arg_names.index(unicode_arg_name)
if len(args) > arg_index:
arg = args[arg_index]
elif unicode_arg_name in kwargs:
arg = kwargs[unicode_arg_name]
else:
# Not given as argument, even though in list
continue
if not isinstance(arg, unicode):
raise TypeError("Parameter '{}' should be Unicode".format(unicode_arg_name))
except ValueError:
raise NameError(unicode_arg_name)
return _func_(*args, **kwargs)
return modified
return check_types
| #!/usr/bin/env python
# encoding: utf8
# Copyright (C) 2015 by Roy Levien.
# This file is part of crypto-enigma, an Enigma Machine simulator.
# released under the BSD-3 License (see LICENSE.txt).
"""
Description
.. note::
Any additional note.
"""
from __future__ import (absolute_import, print_function, division, unicode_literals)
import time
import sys
# TBD - Generalize to other platforms; test?
def print_over(s, backup=True, delay=0.2):
if backup:
print('', end='\r')
print("\033[F" * (s.count('\n')+2))
print(s)
sys.stdout.flush()
time.sleep(delay)
def num_A0(c):
return ord(c) - ord('A')
def chr_A0(n):
return chr(n + ord('A'))
def ordering(items):
return [i[1] for i in sorted(zip(items, range(0, len(items))))]
# standard simple-substitution cypher encoding
def encode_char(mapping, ch):
if ch == ' ':
return ' '
else:
return mapping[num_A0(ch)]
def encode_string(mapping, string):
return ''.join([encode_char(mapping, ch) for ch in string])
# scan, because it's missing from Python; implemented to anticipate Python 3
def accumulate(l, f):
it = iter(l)
total = next(it)
yield total
for element in it:
total = f(total, element)
yield total
# also missing from Python
def chunk_of(it, n):
return [it[i:i+n] for i in range(0, len(it), n)]
# require unicode strings (see unicode_literal in enigma.py) - http://stackoverflow.com/a/33743668/656912
def require_unicode(*given_arg_names):
def check_types(_func_, *args):
def modified(*args):
arg_names = list(_func_.func_code.co_varnames[:_func_.func_code.co_argcount])
if len(given_arg_names) == 0:
unicode_arg_names = arg_names
else:
unicode_arg_names = given_arg_names
for unicode_arg_name in unicode_arg_names:
try:
arg_index = arg_names.index(unicode_arg_name)
except ValueError:
raise NameError(unicode_arg_name)
arg = args[arg_index]
if not isinstance(arg, unicode):
raise TypeError("Parameter '{}' should be Unicode".format(unicode_arg_name))
return _func_(*args)
return modified
return check_types
| Python | 0.000001 |
d8fc3888f0b40a8b7a476fc3fec0ca3dfe7a2416 | make API able to work with single names | gender.py | gender.py | import requests, json
def getGenders(names):
url = ""
cnt = 0
if not isinstance(names,list):
names = [names,]
for name in names:
if url == "":
url = "name[0]=" + name
else:
cnt += 1
url = url + "&name[" + str(cnt) + "]=" + name
req = requests.get("http://api.genderize.io?" + url)
results = json.loads(req.text)
if len(names)==1 :
results = [ results, ]
retrn = []
for result in results:
if result["gender"] is not None:
retrn.append((result["gender"], result["probability"], result["count"]))
else:
retrn.append((u'None',u'0.0',0.0))
return retrn
if __name__ == '__main__':
print(getGenders(["Brian","Apple","Jessica","Zaeem","NotAName"]))
| import requests, json
def getGenders(names):
url = ""
cnt = 0
for name in names:
if url == "":
url = "name[0]=" + name
else:
cnt += 1
url = url + "&name[" + str(cnt) + "]=" + name
req = requests.get("http://api.genderize.io?" + url)
results = json.loads(req.text)
retrn = []
for result in results:
if result["gender"] is not None:
retrn.append((result["gender"], result["probability"], result["count"]))
else:
retrn.append((u'None',u'0.0',0.0))
return retrn
if __name__ == '__main__':
print getGenders(["Brian","Apple","Jessica","Zaeem","NotAName"])
| Python | 0 |
fc6c6f9ecbf694198c650cf86151423226304c51 | put import statement in try | alphatwirl/delphes/load_delphes.py | alphatwirl/delphes/load_delphes.py | # Tai Sakuma <tai.sakuma@cern.ch>
try:
import ROOT
except ImportError:
pass
_loaded = False
##__________________________________________________________________||
def load_delphes():
global _loaded
if _loaded:
return
# https://root.cern.ch/phpBB3/viewtopic.php?t=21603
ROOT.gInterpreter.Declare('#include "classes/DelphesClasses.h"')
# https://cp3.irmp.ucl.ac.be/projects/delphes/ticket/1039
ROOT.gInterpreter.Declare('#include "external/ExRootAnalysis/ExRootTreeReader.h"')
ROOT.gSystem.Load("libDelphes.so")
_loaded = True
##__________________________________________________________________||
| # Tai Sakuma <tai.sakuma@cern.ch>
import ROOT
_loaded = False
##__________________________________________________________________||
def load_delphes():
global _loaded
if _loaded:
return
# https://root.cern.ch/phpBB3/viewtopic.php?t=21603
ROOT.gInterpreter.Declare('#include "classes/DelphesClasses.h"')
# https://cp3.irmp.ucl.ac.be/projects/delphes/ticket/1039
ROOT.gInterpreter.Declare('#include "external/ExRootAnalysis/ExRootTreeReader.h"')
ROOT.gSystem.Load("libDelphes.so")
_loaded = True
##__________________________________________________________________||
| Python | 0.000001 |
1eb648b14c52c9a2e715774ec71b2c8e6228efc4 | add vtkNumpy.numpyToImageData() function | src/python/director/vtkNumpy.py | src/python/director/vtkNumpy.py | from director.shallowCopy import shallowCopy
import director.vtkAll as vtk
from vtk.util import numpy_support
import numpy as np
def numpyToPolyData(pts, pointData=None, createVertexCells=True):
pd = vtk.vtkPolyData()
pd.SetPoints(getVtkPointsFromNumpy(pts.copy()))
if pointData is not None:
for key, value in pointData.iteritems():
addNumpyToVtk(pd, value.copy(), key)
if createVertexCells:
f = vtk.vtkVertexGlyphFilter()
f.SetInputData(pd)
f.Update()
pd = shallowCopy(f.GetOutput())
return pd
def numpyToImageData(img, flip=True, vtktype=vtk.VTK_UNSIGNED_CHAR):
if flip:
img = np.flipud(img)
height, width, numChannels = img.shape
image = vtk.vtkImageData()
image.SetDimensions(width, height, 1)
image.AllocateScalars(vtktype, numChannels)
scalars = getNumpyFromVtk(image, 'ImageScalars')
scalars[:] = img.reshape(width*height, numChannels)[:]
return image
def getNumpyFromVtk(dataObj, arrayName='Points', arrayType='points'):
assert arrayType in ('points', 'cells')
if arrayName == 'Points':
vtkArray = dataObj.GetPoints().GetData()
elif arrayType == 'points':
vtkArray = dataObj.GetPointData().GetArray(arrayName)
else:
vtkArray = dataObj.GetCellData().GetArray(arrayName)
if not vtkArray:
raise KeyError('Array not found')
return numpy_support.vtk_to_numpy(vtkArray)
def getVtkPointsFromNumpy(numpyArray):
points = vtk.vtkPoints()
points.SetData(getVtkFromNumpy(numpyArray))
return points
def getVtkPolyDataFromNumpyPoints(points):
return numpyToPolyData(points)
def getVtkFromNumpy(numpyArray):
def MakeCallback(numpyArray):
def Closure(caller, event):
closureArray = numpyArray
return Closure
vtkArray = numpy_support.numpy_to_vtk(numpyArray)
vtkArray.AddObserver('DeleteEvent', MakeCallback(numpyArray))
return vtkArray
def addNumpyToVtk(dataObj, numpyArray, arrayName, arrayType='points'):
assert arrayType in ('points', 'cells')
vtkArray = getVtkFromNumpy(numpyArray)
vtkArray.SetName(arrayName)
if arrayType == 'points':
assert dataObj.GetNumberOfPoints() == numpyArray.shape[0]
dataObj.GetPointData().AddArray(vtkArray)
else:
assert dataObj.GetNumberOfCells() == numpyArray.shape[0]
dataObj.GetCellData().AddArray(vtkArray)
| from director.shallowCopy import shallowCopy
import director.vtkAll as vtk
from vtk.util import numpy_support
import numpy as np
def numpyToPolyData(pts, pointData=None, createVertexCells=True):
pd = vtk.vtkPolyData()
pd.SetPoints(getVtkPointsFromNumpy(pts.copy()))
if pointData is not None:
for key, value in pointData.iteritems():
addNumpyToVtk(pd, value.copy(), key)
if createVertexCells:
f = vtk.vtkVertexGlyphFilter()
f.SetInputData(pd)
f.Update()
pd = shallowCopy(f.GetOutput())
return pd
def getNumpyFromVtk(dataObj, arrayName='Points', arrayType='points'):
assert arrayType in ('points', 'cells')
if arrayName == 'Points':
vtkArray = dataObj.GetPoints().GetData()
elif arrayType == 'points':
vtkArray = dataObj.GetPointData().GetArray(arrayName)
else:
vtkArray = dataObj.GetCellData().GetArray(arrayName)
if not vtkArray:
raise KeyError('Array not found')
return numpy_support.vtk_to_numpy(vtkArray)
def getVtkPointsFromNumpy(numpyArray):
points = vtk.vtkPoints()
points.SetData(getVtkFromNumpy(numpyArray))
return points
def getVtkPolyDataFromNumpyPoints(points):
return numpyToPolyData(points)
def getVtkFromNumpy(numpyArray):
def MakeCallback(numpyArray):
def Closure(caller, event):
closureArray = numpyArray
return Closure
vtkArray = numpy_support.numpy_to_vtk(numpyArray)
vtkArray.AddObserver('DeleteEvent', MakeCallback(numpyArray))
return vtkArray
def addNumpyToVtk(dataObj, numpyArray, arrayName, arrayType='points'):
assert arrayType in ('points', 'cells')
vtkArray = getVtkFromNumpy(numpyArray)
vtkArray.SetName(arrayName)
if arrayType == 'points':
assert dataObj.GetNumberOfPoints() == numpyArray.shape[0]
dataObj.GetPointData().AddArray(vtkArray)
else:
assert dataObj.GetNumberOfCells() == numpyArray.shape[0]
dataObj.GetCellData().AddArray(vtkArray)
| Python | 0.000004 |
981e9a2348953374cc18669318d1d7e92197e0e1 | Update clinical trials | providers/gov/clinicaltrials/normalizer.py | providers/gov/clinicaltrials/normalizer.py | import pendulum
from share.normalize import *
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class AgentIdentifier(Parser):
# email address
uri = IRI(ctx)
class WorkIdentifier(Parser):
uri = IRI(ctx)
class AffiliatedAgent(Parser):
schema = GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = Delegate(AffiliatedAgent, ctx)
class Institution(Parser):
name = OneOf(ctx.agency, ctx.facility.name, ctx)
location = RunPython('get_location', Try(ctx.facility.address))
class Extra:
agency_class = Try(ctx.agency_class)
def get_location(self, ctx):
location = ""
if 'country' in ctx:
location += ctx['country'] + ': '
if 'city' in ctx:
location += ctx['city'] + ', '
if 'state' in ctx:
location += ctx['state'] + ' '
return location
class Person(Parser):
given_name = Maybe(ctx, 'first_name')
family_name = Maybe(ctx, 'last_name')
additional_name = Maybe(ctx, 'middle_name')
identifiers = Map(Delegate(AgentIdentifier), Try(ctx.email))
related_agents = Map(Delegate(IsAffiliatedWith), Try(ctx.affiliation))
class Contributor(Parser):
agent = Delegate(Person, ctx)
class Funder(Parser):
agent = Delegate(Institution, ctx)
class CreativeWork(Parser):
title = OneOf(
ctx.clinical_study.official_title,
ctx.clinical_study.brief_title
)
description = Maybe(ctx.clinical_study, 'brief_summary')['textblock']
related_agents = Concat(
Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_official')),
Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_contact')),
Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_contact_backup')),
Map(Delegate(Funder),
Concat(ctx.clinical_study.sponsors.lead_sponsor,
Maybe(ctx.clinical_study.sponsors, 'collaborator'),
RunPython('get_locations', Concat(Try(ctx.clinical_study.location)))))
)
tags = Map(Delegate(ThroughTags), Maybe(ctx.clinical_study, 'keyword'))
identifiers = Concat(Map(Delegate(WorkIdentifier), Concat(
ctx['clinical_study']['required_header']['url'],
RunPython('format_url', ctx.clinical_study.id_info.nct_id, 'http://www.bioportfolio.com/resources/trial/'),
RunPython('format_url', Try(ctx.clinical_study.reference.PMID), 'www.ncbi.nlm.nih.gov/pubmed/'))))
class Extra:
share_harvest_date = ctx.clinical_study.required_header.download_date
org_study_id = ctx.clinical_study.id_info.org_study_id
status = ctx.clinical_study.overall_status
start_date = RunPython('parse_date', Try(ctx.clinical_study.start_date))
completion_date = RunPython('parse_date', Try(ctx.clinical_study.completion_date['#text']))
completion_date_type = Try(ctx.clinical_study.completion_date['@type'])
study_type = ctx.clinical_study.study_type
conditions = ctx.clinical_study.condition
is_fda_regulated = ctx.clinical_study.is_fda_regulated
is_section_801 = Try(ctx.clinical_study.is_section_801)
citation = Try(ctx.clinical_study.reference.citation)
def get_locations(self, locations):
results = []
for location in locations:
if 'name' in location['facility']:
results.append(location)
return results
def parse_date(self, date):
try:
return pendulum.from_format(date, '%M %d, %Y').isoformat()
except ValueError:
return pendulum.from_format(date, '%B %Y').isoformat()
def format_url(self, id, base):
return base + id
| import pendulum
from share.normalize import *
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class AgentIdentifier(Parser):
# email address
uri = IRI(ctx)
class WorkIdentifier(Parser):
uri = IRI(ctx)
class AffiliatedAgent(Parser):
schema = GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = Delegate(AffiliatedAgent, ctx)
class Institution(Parser):
name = OneOf(ctx.agency, ctx.facility.name, ctx)
location = RunPython('get_location', Try(ctx.facility.address))
class Extra:
agency_class = Try(ctx.agency_class)
def get_location(self, ctx):
location = ""
if 'country' in ctx:
location += ctx['country'] + ': '
if 'city' in ctx:
location += ctx['city'] + ', '
if 'state' in ctx:
location += ctx['state'] + ' '
return location
class Person(Parser):
given_name = Maybe(ctx, 'first_name')
family_name = Maybe(ctx, 'last_name')
additional_name = Maybe(ctx, 'middle_name')
identifiers = Map(Delegate(AgentIdentifier), Try(ctx.email))
related_agents = Map(Delegate(IsAffiliatedWith), Try(ctx.affiliation))
class Contributor(Parser):
agent = Delegate(Person, ctx)
class CreativeWork(Parser):
title = OneOf(
ctx.clinical_study.official_title,
ctx.clinical_study.brief_title
)
description = Maybe(ctx.clinical_study, 'brief_summary')['textblock']
related_agents = Concat(
Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_official')),
Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_contact')),
Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_contact_backup')),
Map(Delegate(Institution),
Concat(ctx.clinical_study.sponsors.lead_sponsor,
Maybe(ctx.clinical_study.sponsors, 'collaborator'),
RunPython('get_locations', Concat(Try(ctx.clinical_study.location)))))
)
tags = Map(Delegate(ThroughTags), Maybe(ctx.clinical_study, 'keyword'))
identifiers = Concat(Map(Delegate(WorkIdentifier), Concat(
ctx['clinical_study']['required_header']['url'],
RunPython('format_url', ctx.clinical_study.id_info.nct_id, 'http://www.bioportfolio.com/resources/trial/'),
RunPython('format_url', Try(ctx.clinical_study.reference.PMID), 'www.ncbi.nlm.nih.gov/pubmed/'))))
class Extra:
share_harvest_date = ctx.clinical_study.required_header.download_date
org_study_id = ctx.clinical_study.id_info.org_study_id
status = ctx.clinical_study.overall_status
start_date = RunPython('parse_date', Try(ctx.clinical_study.start_date))
completion_date = RunPython('parse_date', Try(ctx.clinical_study.completion_date['#text']))
completion_date_type = Try(ctx.clinical_study.completion_date['@type'])
study_type = ctx.clinical_study.study_type
conditions = ctx.clinical_study.condition
is_fda_regulated = ctx.clinical_study.is_fda_regulated
is_section_801 = Try(ctx.clinical_study.is_section_801)
citation = Try(ctx.clinical_study.reference.citation)
def get_locations(self, locations):
results = []
for location in locations:
if 'name' in location['facility']:
results.append(location)
return results
def parse_date(self, date):
try:
return pendulum.from_format(date, '%M %d, %Y').isoformat()
except ValueError:
return pendulum.from_format(date, '%B %Y').isoformat()
def format_url(self, id, base):
return base + id
| Python | 0 |
f95ab3d2e9a9fc7c92698aded033f4860225c718 | Add rate reporting for oadoi importer | backend/oadoi.py | backend/oadoi.py | # -*- encoding: utf-8 -*-
import gzip
import json
from django.db import DataError
from datetime import datetime
from papers.models import Paper
from papers.models import OaiSource
from papers.baremodels import BareOaiRecord
from papers.doi import doi_to_crossref_identifier
from papers.doi import doi_to_url
from papers.doi import to_doi
from backend.doiprefixes import free_doi_prefixes
from papers.errors import MetadataSourceException
class OadoiAPI(object):
"""
An interface to import an OAdoi dump into dissemin
"""
def __init__(self):
self.oadoi_source, _ = OaiSource.objects.get_or_create(
identifier='oadoi_repo',
defaults=
{'name':'OAdoi',
'oa':True,
'priority':-10,
'default_pubtype':'preprint'})
self.crossref_source = OaiSource.objects.get(identifier='crossref')
def load_dump(self, filename, start_doi=None, update_index=False, create_missing_dois=True):
"""
Reads a dump from the disk and loads it to the db
"""
last_rate_report = None
report_batch_size = 1000
with gzip.open(filename, 'r') as f:
start_doi_seen = start_doi is None
for idx, line in enumerate(f):
record = json.loads(line.decode('utf-8'))
if not start_doi_seen and record.get('doi') == start_doi:
start_doi_seen = True
if idx % report_batch_size == 0:
print(idx, record.get('doi'))
if last_rate_report:
td = (datetime.utcnow() - last_rate_report).total_seconds()
if td:
print('importing speed: {} lines/sec'.format(report_batch_size/float(td)))
last_rate_report = datetime.utcnow()
if start_doi_seen:
self.create_oairecord(record, update_index, create_missing_dois)
def create_oairecord(self, record, update_index=True, create_missing_dois=True):
"""
Given one line of the dump (represented as a dict),
add it to the corresponding paper (if it exists)
"""
doi = to_doi(record['doi'])
if not doi:
return
prefix = doi.split('/')[0]
if prefix in free_doi_prefixes:
return
if not record.get('oa_locations'):
return
paper = Paper.get_by_doi(doi)
if not paper:
if not create_missing_dois:
return
try:
paper = Paper.create_by_doi(doi)
except (MetadataSourceException, ValueError):
return
if not paper:
print('no such paper for doi {doi}'.format(doi=doi))
return
print(doi)
paper.cache_oairecords()
for oa_location in record.get('oa_locations') or []:
url = oa_location['url']
# just to speed things up a bit...
if paper.pdf_url == url:
return
identifier='oadoi:'+url
source = self.oadoi_source
if oa_location['host_type'] == 'publisher':
url = doi_to_url(doi)
identifier = doi_to_crossref_identifier(doi)
source = self.crossref_source
record = BareOaiRecord(
paper=paper,
doi=doi,
pubtype=paper.doctype,
source=source,
identifier=identifier,
splash_url=url,
pdf_url=oa_location['url'])
try:
# We disable checks by DOI since we know the paper has been looked up by DOI already.
old_pdf_url = paper.pdf_url
paper.add_oairecord(record, check_by_doi=False)
super(Paper, paper).update_availability()
if old_pdf_url != paper.pdf_url:
paper.save()
if update_index:
paper.update_index()
except (DataError, ValueError):
print('Record does not fit in the DB')
| # -*- encoding: utf-8 -*-
import gzip
import json
from django.db import DataError
from papers.models import Paper
from papers.models import OaiSource
from papers.baremodels import BareOaiRecord
from papers.doi import doi_to_crossref_identifier
from papers.doi import doi_to_url
from papers.doi import to_doi
from backend.doiprefixes import free_doi_prefixes
from papers.errors import MetadataSourceException
class OadoiAPI(object):
"""
An interface to import an OAdoi dump into dissemin
"""
def __init__(self):
self.oadoi_source, _ = OaiSource.objects.get_or_create(
identifier='oadoi_repo',
defaults=
{'name':'OAdoi',
'oa':True,
'priority':-10,
'default_pubtype':'preprint'})
self.crossref_source = OaiSource.objects.get(identifier='crossref')
def load_dump(self, filename, start_doi=None, update_index=False, create_missing_dois=True):
"""
Reads a dump from the disk and loads it to the db
"""
with gzip.open(filename, 'r') as f:
start_doi_seen = start_doi is None
for idx, line in enumerate(f):
record = json.loads(line.decode('utf-8'))
if not start_doi_seen and record.get('doi') == start_doi:
start_doi_seen = True
if idx % 10000 == 0:
print(idx, record.get('doi'))
if start_doi_seen:
self.create_oairecord(record, update_index, create_missing_dois)
def create_oairecord(self, record, update_index=True, create_missing_dois=True):
"""
Given one line of the dump (represented as a dict),
add it to the corresponding paper (if it exists)
"""
doi = to_doi(record['doi'])
if not doi:
return
prefix = doi.split('/')[0]
if prefix in free_doi_prefixes:
return
if not record.get('oa_locations'):
return
paper = Paper.get_by_doi(doi)
if not paper:
if not create_missing_dois:
return
try:
paper = Paper.create_by_doi(doi)
except (MetadataSourceException, ValueError):
return
if not paper:
print('no such paper for doi {doi}'.format(doi=doi))
return
print(doi)
paper.cache_oairecords()
for oa_location in record.get('oa_locations') or []:
url = oa_location['url']
# just to speed things up a bit...
if paper.pdf_url == url:
return
identifier='oadoi:'+url
source = self.oadoi_source
if oa_location['host_type'] == 'publisher':
url = doi_to_url(doi)
identifier = doi_to_crossref_identifier(doi)
source = self.crossref_source
record = BareOaiRecord(
paper=paper,
doi=doi,
pubtype=paper.doctype,
source=source,
identifier=identifier,
splash_url=url,
pdf_url=oa_location['url'])
try:
# We disable checks by DOI since we know the paper has been looked up by DOI already.
old_pdf_url = paper.pdf_url
paper.add_oairecord(record, check_by_doi=False)
super(Paper, paper).update_availability()
if old_pdf_url != paper.pdf_url:
paper.save()
if update_index:
paper.update_index()
except (DataError, ValueError):
print('Record does not fit in the DB')
| Python | 0 |
5f522cf58a1566513e874002bdaeb063e8a02497 | Update model and add TODO | server/models/checkup.py | server/models/checkup.py | # -*- coding: utf-8 -*-
from datetime import datetime
from app import db
class Checkup(db.Model):
__tablename__ = 'checkup'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.utcnow)
# TODO: add one unique constraint on the column group of owner and repo
owner = db.Column(db.String)
repo = db.Column(db.String)
criteria = db.relationship('Criterion', backref='criterion',
lazy='dynamic')
| # -*- coding: utf-8 -*-
from datetime import datetime
from app import db
class Checkup(db.Model):
__tablename__ = 'checkup'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.utcnow)
repo_name = db.Column(db.String, unique=True) # github-user/repo-name
criteria = db.relationship('Criterion', backref='criterion',
lazy='dynamic')
| Python | 0 |
2f61692dd05f2ef529c9d2556c59eb7bc720b1f7 | Fixed? reset password | oclubs/access/email.py | oclubs/access/email.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
"""
Module to send emails.
This module sends emails with either Postfix or SendGrid.
"""
from __future__ import absolute_import, unicode_literals
import traceback
from envelopes import Envelope, SMTP
from oclubs.access.delay import delayed_func
from_email = ('no-reply@connect.shs.cn', 'Connect')
@delayed_func
def send(to_email, subject, content):
"""
Send an email.
:param tuple to_email: email recipient address and name
:param basestring subject: email subject
:param basestring content: email content
"""
try:
conn = SMTP('connect.shs.cn', 25)
mail = Envelope(
to_addr=to_email[0],
from_addr=from_email,
subject=subject,
text_body=content
)
conn.send(mail)
except Exception:
traceback.print_exc()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
"""
Module to send emails.
This module sends emails with either Postfix or SendGrid.
"""
from __future__ import absolute_import, unicode_literals
import traceback
from envelopes import Envelope, SMTP
from oclubs.access.delay import delayed_func
from_email = ('no-reply@connect.shs.cn', 'Connect')
@delayed_func
def send(to_email, subject, content):
"""
Send an email.
:param tuple to_email: email recipient address and name
:param basestring subject: email subject
:param basestring content: email content
"""
try:
conn = SMTP('127.0.0.1', 25)
mail = Envelope(
to_addr=to_email[0],
from_addr=from_email,
subject=subject,
text_body=content
)
conn.send(mail)
except Exception:
traceback.print_exc()
| Python | 0.999775 |
fb1ddcdd789d1c1be02a9f6d63a21548a8cf584e | Fix undo of PlatformPhysicsOperation after the SceneNode changes | printer/PlatformPhysicsOperation.py | printer/PlatformPhysicsOperation.py | from UM.Operations.Operation import Operation
from UM.Operations.AddSceneNodeOperation import AddSceneNodeOperation
from UM.Operations.TranslateOperation import TranslateOperation
from UM.Operations.GroupedOperation import GroupedOperation
## A specialised operation designed specifically to modify the previous operation.
class PlatformPhysicsOperation(Operation):
def __init__(self, node, translation):
super().__init__()
self._node = node
self._old_position = node.getPosition()
self._new_position = node.getPosition() + translation
self._always_merge = True
def undo(self):
self._node.setPosition(self._old_position)
def redo(self):
self._node.setPosition(self._new_position)
def mergeWith(self, other):
group = GroupedOperation()
group.addOperation(self)
group.addOperation(other)
return group
def __repr__(self):
return 'PlatformPhysicsOperation(t = {0})'.format(self._position)
| from UM.Operations.Operation import Operation
from UM.Operations.AddSceneNodeOperation import AddSceneNodeOperation
from UM.Operations.TranslateOperation import TranslateOperation
from UM.Operations.GroupedOperation import GroupedOperation
## A specialised operation designed specifically to modify the previous operation.
class PlatformPhysicsOperation(Operation):
def __init__(self, node, translation):
super().__init__()
self._node = node
self._transform = node.getLocalTransformation()
self._position = node.getPosition() + translation
self._always_merge = True
def undo(self):
self._node.setLocalTransformation(self._transform)
def redo(self):
self._node.setPosition(self._position)
def mergeWith(self, other):
group = GroupedOperation()
group.addOperation(self)
group.addOperation(other)
return group
def __repr__(self):
return 'PlatformPhysicsOperation(t = {0})'.format(self._position)
| Python | 0 |
e89c20e1ecfadb7e63a1fe80d821afafb8860352 | add missing import | tfx/experimental/templates/taxi/launcher/stub_component_launcher.py | tfx/experimental/templates/taxi/launcher/stub_component_launcher.py | # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub component launcher for launching stub executors in KFP."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfx.experimental.pipeline_testing import stub_component_launcher
from tfx.experimental.templates.taxi.pipeline import configs
class StubComponentLauncher(stub_component_launcher.StubComponentLauncher):
"""Responsible for launching stub executors in KFP Template.
This stub component launcher cannot be defined in the kubeflow_dag_runner.py
because launcher class is imported by the module path.
"""
pass
# GCS directory where KFP outputs are recorded
test_data_dir = "gs://{}/testdata".format(configs.GCS_BUCKET_NAME)
# TODO(StubExecutor): customize self.stubbed_component_ids to replace components
# with BaseStubExecutor
stubbed_component_ids = ['CsvExampleGen', 'StatisticsGen',
'SchemaGen', 'ExampleValidator',
'Trainer', 'Transform', 'Evaluator', 'Pusher']
# TODO(StubExecutor): (Optional) Use stubbed_component_map to insert custom stub
# executor class as a value and component id as a key.
stubbed_component_map = {}
StubComponentLauncher.get_stub_launcher_class(
test_data_dir,
stubbed_component_ids,
stubbed_component_map)
| # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub component launcher for launching stub executors in KFP."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfx.experimental.pipeline_testing import stub_component_launcher
class StubComponentLauncher(stub_component_launcher.StubComponentLauncher):
"""Responsible for launching stub executors in KFP Template.
This stub component launcher cannot be defined in the kubeflow_dag_runner.py
because launcher class is imported by the module path.
"""
pass
# GCS directory where KFP outputs are recorded
test_data_dir = "gs://{}/testdata".format(configs.GCS_BUCKET_NAME)
# TODO(StubExecutor): customize self.stubbed_component_ids to replace components
# with BaseStubExecutor
stubbed_component_ids = ['CsvExampleGen', 'StatisticsGen',
'SchemaGen', 'ExampleValidator',
'Trainer', 'Transform', 'Evaluator', 'Pusher']
# TODO(StubExecutor): (Optional) Use stubbed_component_map to insert custom stub
# executor class as a value and component id as a key.
stubbed_component_map = {}
StubComponentLauncher.get_stub_launcher_class(
test_data_dir,
stubbed_component_ids,
stubbed_component_map)
| Python | 0.000042 |
7f4a02f7058c4e7dfd4bbb01ba847e6990b5e391 | update admin | corehq/apps/userreports/admin.py | corehq/apps/userreports/admin.py | from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from .models import AsyncIndicator, DataSourceActionLog, InvalidUCRData
@admin.register(AsyncIndicator)
class AsyncIndicatorAdmin(admin.ModelAdmin):
model = AsyncIndicator
list_display = [
'doc_id',
'doc_type',
'domain',
'indicator_config_ids',
'date_created',
'date_queued',
'unsuccessful_attempts'
]
list_filter = ('doc_type', 'domain', 'unsuccessful_attempts')
search_fields = ('doc_id',)
@admin.register(InvalidUCRData)
class InvalidUCRDataAdmin(admin.ModelAdmin):
model = InvalidUCRData
list_display = [
'doc_id',
'doc_type',
'domain',
'indicator_config_id',
'validation_name',
]
list_filter = ('doc_type', 'domain', 'indicator_config_id', 'validation_name')
search_fields = ('doc_id',)
@admin.register(DataSourceActionLog)
class DataSourceActionLogAdmin(admin.ModelAdmin):
model = DataSourceActionLog
list_display = [
'date_created',
'domain',
'indicator_config_id',
'initiated_by',
'action_source',
'action',
'skip_destructive'
]
list_filter = ('action_source', 'action', 'skip_destructive')
search_fields = ('domain', 'indicator_config_id',)
| from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from .models import AsyncIndicator, DataSourceActionLog, InvalidUCRData
@admin.register(AsyncIndicator)
class AsyncIndicatorAdmin(admin.ModelAdmin):
model = AsyncIndicator
list_display = [
'doc_id',
'doc_type',
'domain',
'indicator_config_ids',
'date_created',
'date_queued',
'unsuccessful_attempts'
]
list_filter = ('doc_type', 'domain', 'unsuccessful_attempts')
search_fields = ('doc_id',)
@admin.register(InvalidUCRData)
class InvalidUCRDataAdmin(admin.ModelAdmin):
model = InvalidUCRData
list_display = [
'doc_id',
'doc_type',
'domain',
'indicator_config_id',
'validation_name',
]
list_filter = ('doc_type', 'domain', 'indicator_config_id', 'validation_name')
search_fields = ('doc_id',)
@admin.register(DataSourceActionLog)
class DataSourceActionLogAdmin(admin.ModelAdmin):
model = DataSourceActionLog
list_display = [
'date_created',
'domain',
'indicator_config_id',
'initiated_by',
'action_source',
'action',
]
list_filter = ('action_source', 'action')
search_fields = ('domain', 'indicator_config_id',)
| Python | 0 |
c10f222bb6de5150087a2ddd26ffbef2f8eeb4a3 | break down method | corehq/apps/users/permissions.py | corehq/apps/users/permissions.py | from collections import namedtuple
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
FORM_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.ExcelExportReport'
DEID_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.DeidExportReport'
CASE_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.CaseExportReport'
SMS_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.SMSExportReport'
EXPORT_PERMISSIONS = {FORM_EXPORT_PERMISSION, DEID_EXPORT_PERMISSION, CASE_EXPORT_PERMISSION}
ReportPermission = namedtuple('ReportPermission', ['slug', 'title', 'is_visible'])
def get_extra_permissions():
from corehq.apps.export.views.list import (
FormExportListView, DeIdFormExportListView, CaseExportListView
)
from corehq.apps.export.views.download import DownloadNewSmsExportView
yield ReportPermission(
FORM_EXPORT_PERMISSION, FormExportListView.page_title, lambda domain: True)
yield ReportPermission(
DEID_EXPORT_PERMISSION, DeIdFormExportListView.page_title,
lambda domain: domain_has_privilege(domain, privileges.DEIDENTIFIED_DATA))
yield ReportPermission(
CASE_EXPORT_PERMISSION, CaseExportListView.page_title, lambda domain: True)
yield ReportPermission(
SMS_EXPORT_PERMISSION, DownloadNewSmsExportView.page_title, lambda domain: True)
def can_download_data_files(domain, couch_user):
from corehq.apps.users.models import DomainMembershipError
try:
role = couch_user.get_role(domain)
except DomainMembershipError:
return False
return toggles.DATA_FILE_DOWNLOAD.enabled(domain) and role.permissions.view_file_dropzone
def can_view_sms_exports(couch_user, domain):
return has_permission_to_view_report(
couch_user, domain, SMS_EXPORT_PERMISSION
)
def has_permission_to_view_report(couch_user, domain, report_to_check):
from corehq.apps.users.decorators import get_permission_name
from corehq.apps.users.models import Permissions
return (
couch_user.can_view_reports(domain) or
couch_user.has_permission(
domain,
get_permission_name(Permissions.view_report),
data=report_to_check
)
)
def can_manage_releases(couch_user, domain, app_id):
if _can_manage_releases_for_all_apps(couch_user, domain):
return True
role = couch_user.get_role(domain)
return app_id in role.permissions.manage_releases_list
def _can_manage_releases_for_all_apps(couch_user, domain):
from corehq.apps.users.decorators import get_permission_name
from corehq.apps.users.models import Permissions
restricted_app_release = toggles.RESTRICT_APP_RELEASE.enabled(domain)
if not restricted_app_release:
return True
return couch_user.has_permission(
domain, get_permission_name(Permissions.manage_releases),
restrict_global_admin=True
)
| from collections import namedtuple
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
FORM_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.ExcelExportReport'
DEID_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.DeidExportReport'
CASE_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.CaseExportReport'
SMS_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.SMSExportReport'
EXPORT_PERMISSIONS = {FORM_EXPORT_PERMISSION, DEID_EXPORT_PERMISSION, CASE_EXPORT_PERMISSION}
ReportPermission = namedtuple('ReportPermission', ['slug', 'title', 'is_visible'])
def get_extra_permissions():
from corehq.apps.export.views.list import (
FormExportListView, DeIdFormExportListView, CaseExportListView
)
from corehq.apps.export.views.download import DownloadNewSmsExportView
yield ReportPermission(
FORM_EXPORT_PERMISSION, FormExportListView.page_title, lambda domain: True)
yield ReportPermission(
DEID_EXPORT_PERMISSION, DeIdFormExportListView.page_title,
lambda domain: domain_has_privilege(domain, privileges.DEIDENTIFIED_DATA))
yield ReportPermission(
CASE_EXPORT_PERMISSION, CaseExportListView.page_title, lambda domain: True)
yield ReportPermission(
SMS_EXPORT_PERMISSION, DownloadNewSmsExportView.page_title, lambda domain: True)
def can_download_data_files(domain, couch_user):
from corehq.apps.users.models import DomainMembershipError
try:
role = couch_user.get_role(domain)
except DomainMembershipError:
return False
return toggles.DATA_FILE_DOWNLOAD.enabled(domain) and role.permissions.view_file_dropzone
def can_view_sms_exports(couch_user, domain):
return has_permission_to_view_report(
couch_user, domain, SMS_EXPORT_PERMISSION
)
def has_permission_to_view_report(couch_user, domain, report_to_check):
from corehq.apps.users.decorators import get_permission_name
from corehq.apps.users.models import Permissions
return (
couch_user.can_view_reports(domain) or
couch_user.has_permission(
domain,
get_permission_name(Permissions.view_report),
data=report_to_check
)
)
def can_manage_releases(couch_user, domain, app_id):
from corehq.apps.users.decorators import get_permission_name
from corehq.apps.users.models import Permissions
restricted_app_release = toggles.RESTRICT_APP_RELEASE.enabled(domain)
if not restricted_app_release:
return True
role = couch_user.get_role(domain)
return (
couch_user.has_permission(
domain, get_permission_name(Permissions.manage_releases),
restrict_global_admin=True
) or
app_id in role.permissions.manage_releases_list)
| Python | 0.028979 |
57e610836297ef136b892ea1cdea5fe9109c45fa | Change the way that test objects are named. | integration/testing.py | integration/testing.py | # Copyright (c) 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import unittest
from pylxd.client import Client
from integration.busybox import create_busybox_image
class IntegrationTestCase(unittest.TestCase):
"""A base test case for pylxd integration tests."""
def setUp(self):
super(IntegrationTestCase, self).setUp()
self.client = Client()
self.lxd = self.client.api
def generate_object_name(self):
test = self.id().split('.')[-1]
rando = str(uuid.uuid1()).split('-')[-1]
return '{}-{}'.format(test, rando)
def create_container(self):
"""Create a container in lxd."""
name = self._generate_object_name()
machine = {
'name': name,
'architecture': 2,
'profiles': ['default'],
'ephemeral': False,
'config': {'limits.cpu': '2'},
'source': {'type': 'image',
'alias': 'busybox'},
}
result = self.lxd['containers'].post(json=machine)
operation_uuid = result.json()['operation'].split('/')[-1]
result = self.lxd.operations[operation_uuid].wait.get()
self.addCleanup(self.delete_container, name)
return name
def delete_container(self, name, enforce=False):
"""Delete a container in lxd."""
# enforce is a hack. There's a race somewhere in the delete.
# To ensure we don't get an infinite loop, let's count.
count = 0
result = self.lxd['containers'][name].delete()
while enforce and result.status_code == 404 and count < 10:
result = self.lxd['containers'][name].delete()
count += 1
try:
operation_uuid = result.json()['operation'].split('/')[-1]
result = self.lxd.operations[operation_uuid].wait.get()
except KeyError:
pass # 404 cases are okay.
def create_image(self):
"""Create an image in lxd."""
path, fingerprint = create_busybox_image()
with open(path, 'rb') as f:
headers = {
'X-LXD-Public': '1',
}
response = self.lxd.images.post(data=f.read(), headers=headers)
operation_uuid = response.json()['operation'].split('/')[-1]
self.lxd.operations[operation_uuid].wait.get()
alias = self.generate_object_name()
response = self.lxd.images.aliases.post(json={
'description': '',
'target': fingerprint,
'name': alias
})
self.addCleanup(self.delete_image, fingerprint)
return fingerprint, alias
def delete_image(self, fingerprint):
"""Delete an image in lxd."""
self.lxd.images[fingerprint].delete()
def assertCommon(self, response):
"""Assert common LXD responses.
LXD responses are relatively standard. This function makes assertions
to all those standards.
"""
self.assertEqual(response.status_code, response.json()['status_code'])
self.assertEqual(
['metadata', 'operation', 'status', 'status_code', 'type'],
sorted(response.json().keys()))
| # Copyright (c) 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from pylxd.client import Client
from integration.busybox import create_busybox_image
class IntegrationTestCase(unittest.TestCase):
"""A base test case for pylxd integration tests."""
def setUp(self):
super(IntegrationTestCase, self).setUp()
self.client = Client()
self.lxd = self.client.api
def create_container(self):
"""Create a container in lxd."""
name = self.id().split('.')[-1].replace('_', '')
machine = {
'name': name,
'architecture': 2,
'profiles': ['default'],
'ephemeral': False,
'config': {'limits.cpu': '2'},
'source': {'type': 'image',
'alias': 'busybox'},
}
result = self.lxd['containers'].post(json=machine)
operation_uuid = result.json()['operation'].split('/')[-1]
result = self.lxd.operations[operation_uuid].wait.get()
self.addCleanup(self.delete_container, name)
return name
def delete_container(self, name, enforce=False):
"""Delete a container in lxd."""
#response = self.lxd.containers['name'].get()
#if response == 200:
# enforce is a hack. There's a race somewhere in the delete.
# To ensure we don't get an infinite loop, let's count.
count = 0
result = self.lxd['containers'][name].delete()
while enforce and result.status_code == 404 and count < 10:
result = self.lxd['containers'][name].delete()
count += 1
try:
operation_uuid = result.json()['operation'].split('/')[-1]
result = self.lxd.operations[operation_uuid].wait.get()
except KeyError:
pass # 404 cases are okay.
def create_image(self):
"""Create an image in lxd."""
path, fingerprint = create_busybox_image()
with open(path, 'rb') as f:
headers = {
'X-LXD-Public': '1',
}
response = self.lxd.images.post(data=f.read(), headers=headers)
operation_uuid = response.json()['operation'].split('/')[-1]
self.lxd.operations[operation_uuid].wait.get()
self.addCleanup(self.delete_image, fingerprint)
return fingerprint
def delete_image(self, fingerprint):
"""Delete an image in lxd."""
self.lxd.images[fingerprint].delete()
def assertCommon(self, response):
"""Assert common LXD responses.
LXD responses are relatively standard. This function makes assertions
to all those standards.
"""
self.assertEqual(response.status_code, response.json()['status_code'])
self.assertEqual(
['metadata', 'operation', 'status', 'status_code', 'type'],
sorted(response.json().keys()))
| Python | 0.000002 |
67e3a95d7c3227da0b8a06dc29f0e9e868e55153 | Check file size before calculating md5sum. | danbooru/downloader.py | danbooru/downloader.py | # -*- coding: utf-8 -*-
# Copyright 2012 codestation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import hashlib
import logging
from time import sleep
from os.path import isfile, join, getsize
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
class Downloader(object):
_total = 1
_stop = False
def __init__(self, path):
self.path = path
def stop(self):
logging.debug("Stopping download job")
self._stop = True
def _calculateMD5(self, name):
try:
file = open(name, 'rb')
md5_hash = hashlib.md5()
while True:
d = file.read(128)
if not d:
break
md5_hash.update(d)
file.close()
return md5_hash.hexdigest()
except IOError:
pass
def downloadQueue(self, dl_list, nohash=False, callback=None):
for dl in dl_list:
if self._stop:
break
base = dl.image.md5 + dl.image.file_ext
subdir = dl.image.md5[0]
filename = join(self.path, subdir, base)
if isfile(filename):
if getsize(filename) == dl.image.file_size:
if nohash:
continue
else:
md5 = self._calculateMD5(filename)
if md5:
if md5 == dl.image.md5:
#logging.debug("%s already exists, skipping" % filename)
continue
else:
logging.warning("%s md5sum doesn't match, re-downloading", filename)
else:
logging.warning("%s filesize doesn't match, re-downloading", filename)
else:
logging.warning("%s doesn't exists, re-downloading", filename)
try:
local_file = open(filename, 'wb')
except IOError:
logging.error('Error while creating %s', filename)
continue
retries = 0
start = 0
while not self._stop and retries < 3:
try:
remote_file = urlopen(dl.file_url)
meta = remote_file.info()
if "Content-Length" in meta:
remote_size = int(meta['Content-Length'])
else:
remote_size = -1
if start:
remote_file.seek(start)
while not self._stop:
buf = remote_file.read(16 * 1024)
if not buf:
break
local_file.write(buf)
start += len(buf)
if callback:
callback(base, start, remote_size)
remote_file.close()
local_file.close()
if callback:
sys.stdout.write("\r")
sys.stdout.flush()
if self._stop:
logging.debug('(%i) %s [ABORTED]', self._total, base)
break
logging.debug('(%i) %s [OK]', self._total, base)
self._total += 1
sleep(1)
break
except URLError as e:
logging.error('>>> Error %s', e.reason)
except HTTPError as e:
logging.error('>>> Error %i: %s', e.code, e.msg)
start = local_file.tell()
retries += 1
logging.warning('Retrying (%i) in 2 seconds...', retries)
sleep(2)
| # -*- coding: utf-8 -*-
# Copyright 2012 codestation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import hashlib
import logging
from time import sleep
from os.path import isfile, join
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
class Downloader(object):
_total = 1
_stop = False
def __init__(self, path):
self.path = path
def stop(self):
logging.debug("Stopping download job")
self._stop = True
def _calculateMD5(self, name):
try:
file = open(name, 'rb')
md5_hash = hashlib.md5()
while True:
d = file.read(128)
if not d:
break
md5_hash.update(d)
file.close()
return md5_hash.hexdigest()
except IOError:
pass
def downloadQueue(self, dl_list, nohash=False, callback=None):
for dl in dl_list:
if self._stop:
break
base = dl.image.md5 + dl.image.file_ext
subdir = dl.image.md5[0]
filename = join(self.path, subdir, base)
if nohash and isfile(filename):
#logging.debug("(%i) %s already exists, skipping" % (self._total, filename))
#self._total += 1
continue
md5 = self._calculateMD5(filename)
if md5:
if md5 == dl.image.md5:
#logging.debug("%s already exists, skipping" % filename)
continue
else:
logging.warning("%s md5sum doesn't match, re-downloading" % filename)
try:
local_file = open(filename, 'wb')
except IOError:
logging.error('Error while creating %s' % filename)
continue
retries = 0
start = 0
while not self._stop and retries < 3:
try:
remote_file = urlopen(dl.file_url)
meta = remote_file.info()
if "Content-Length" in meta:
remote_size = int(meta['Content-Length'])
else:
remote_size = -1
if start:
remote_file.seek(start)
while not self._stop:
buf = remote_file.read(16 * 1024)
if not buf:
break
local_file.write(buf)
start += len(buf)
if callback:
callback(base, start, remote_size)
remote_file.close()
local_file.close()
if callback:
sys.stdout.write("\r")
sys.stdout.flush()
if self._stop:
logging.debug('(%i) %s [ABORTED]' % (self._total, base))
break
logging.debug('(%i) %s [OK]' % (self._total, base))
self._total += 1
sleep(1)
break
except URLError as e:
logging.error('>>> Error %s' % e.reason)
except HTTPError as e:
logging.error('>>> Error %i: %s' % (e.code, e.msg))
start = local_file.tell()
retries += 1
logging.warning('Retrying (%i) in 2 seconds...' % retries)
sleep(2)
| Python | 0 |
5450303c975e34265f6fda3c014b9aed7d002a3c | Fix download path, the existing one has been removed from nvidia's site (#10253) | var/spack/repos/builtin/packages/cudnn/package.py | var/spack/repos/builtin/packages/cudnn/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
version('7.3', '72666d3532850752612706601258a0b2',
url='http://developer.download.nvidia.com/compute/redist/cudnn/v7.3.0/cudnn-9.0-linux-x64-v7.3.0.29.tgz')
version('6.0', 'a08ca487f88774e39eb6b0ef6507451d',
url='http://developer.download.nvidia.com/compute/redist/cudnn/v6.0/cudnn-8.0-linux-x64-v6.0.tgz')
version('5.1', '406f4ac7f7ee8aa9e41304c143461a69',
url='http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz')
depends_on('cuda@8:')
def install(self, spec, prefix):
install_tree('.', prefix)
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
version('7.3', '72666d3532850752612706601258a0b2',
url='https://developer.nvidia.com/compute/machine-learning/cudnn/secure/v7.3.0/prod/9.0_2018920/cudnn-9.0-linux-x64-v7.3.0.29.tgz')
version('6.0', 'a08ca487f88774e39eb6b0ef6507451d',
url='http://developer.download.nvidia.com/compute/redist/cudnn/v6.0/cudnn-8.0-linux-x64-v6.0.tgz')
version('5.1', '406f4ac7f7ee8aa9e41304c143461a69',
url='http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz')
depends_on('cuda@8:')
def install(self, spec, prefix):
install_tree('.', prefix)
| Python | 0 |
bb042f7bd76e364c3be6791c580b9426a4007627 | fix url and add shared variant (#5358) | var/spack/repos/builtin/packages/latte/package.py | var/spack/repos/builtin/packages/latte/package.py | ##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Latte(CMakePackage):
"""Open source density functional tight binding molecular dynamics."""
homepage = "https://github.com/lanl/latte"
url = "https://github.com/lanl/latte/tarball/v1.0"
version('develop', git='https://github.com/lanl/latte', branch='master')
variant('mpi', default=True,
description='Build with mpi')
variant('progress', default=False,
description='Use progress for fast')
variant('shared', default=True, description='Build shared libs')
depends_on("cmake@3.1:", type='build')
depends_on('blas')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('qmd-progress', when='+progress')
root_cmakelists_dir = 'cmake'
def cmake_args(self):
options = []
if '+shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS=ON')
else:
options.append('-DBUILD_SHARED_LIBS=OFF')
if '+mpi' in self.spec:
options.append('-DO_MPI=yes')
if '+progress' in self.spec:
options.append('-DPROGRESS=yes')
return options
| ##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Latte(CMakePackage):
"""Open source density functional tight binding molecular dynamics."""
homepage = "https://gitlab.com/exaalt/latte"
url = "https://gitlab.com/exaalt/latte/tags/v1.0"
version('develop', git='https://gitlab.com/exaalt/latte', branch='cmake')
depends_on("cmake@3.1:", type='build')
depends_on('blas')
depends_on('lapack')
root_cmakelists_dir = 'cmake'
def cmake_args(self):
options = ['-DBUILD_SHARED_LIBS=ON']
return options
| Python | 0 |
08b5b565666d42a6802e136fc8e7cf8d355929b0 | add v2019.1 and v2020.1 (#17648) | var/spack/repos/builtin/packages/qhull/package.py | var/spack/repos/builtin/packages/qhull/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qhull(CMakePackage):
"""Qhull computes the convex hull, Delaunay triangulation, Voronoi
diagram, halfspace intersection about a point, furt hest-site
Delaunay triangulation, and furthest-site Voronoi diagram. The
source code runs in 2-d, 3-d, 4-d, and higher dimensions. Qhull
implements the Quickhull algorithm for computing the convex
hull. It handles roundoff errors from floating point
arithmetic. It computes volumes, surface areas, and
approximations to the convex hull."""
homepage = "http://www.qhull.org"
version('2020.1', sha256='1ac92a5538f61e297c72aebe4d4ffd731ceb3e6045d6d15faf1c212713798df4',
url="http://www.qhull.org/download/qhull-2020-src-8.0.0.tgz")
version('2019.1', sha256='2b7990558c363076261564f61b74db4d0d73b71869755108a469038c07dc43fb',
url="http://www.qhull.org/download/qhull-2019-src-7.3.2.tgz")
version('2015.2', sha256='78b010925c3b577adc3d58278787d7df08f7c8fb02c3490e375eab91bb58a436',
url="http://www.qhull.org/download/qhull-2015-src-7.2.0.tgz")
version('2012.1', sha256='a35ecaa610550b7f05c3ce373d89c30cf74b059a69880f03080c556daebcff88',
url="http://www.qhull.org/download/qhull-2012.1-src.tgz")
patch('qhull-unused-intel-17.02.patch', when='@2015.2')
depends_on('cmake@3.0:', type='build')
def flag_handler(self, name, flags):
# See https://github.com/qhull/qhull/issues/65
if name == 'cxxflags' and self.version == Version('2020.1'):
flags.append(self.compiler.cxx11_flag)
return (flags, None, None)
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qhull(CMakePackage):
"""Qhull computes the convex hull, Delaunay triangulation, Voronoi
diagram, halfspace intersection about a point, furt hest-site
Delaunay triangulation, and furthest-site Voronoi diagram. The
source code runs in 2-d, 3-d, 4-d, and higher dimensions. Qhull
implements the Quickhull algorithm for computing the convex
hull. It handles roundoff errors from floating point
arithmetic. It computes volumes, surface areas, and
approximations to the convex hull."""
homepage = "http://www.qhull.org"
version('2015.2', sha256='78b010925c3b577adc3d58278787d7df08f7c8fb02c3490e375eab91bb58a436',
url="http://www.qhull.org/download/qhull-2015-src-7.2.0.tgz")
version('2012.1', sha256='a35ecaa610550b7f05c3ce373d89c30cf74b059a69880f03080c556daebcff88',
url="http://www.qhull.org/download/qhull-2012.1-src.tgz")
patch('qhull-unused-intel-17.02.patch', when='@2015.2')
depends_on('cmake@2.6:', type='build')
| Python | 0 |
1f6b1d2aca3995a4ac295f7e6a8ab6bf84d6e79b | add logging for ShotDetectorPlotService | shot_detector/services/shot_detector_service.py | shot_detector/services/shot_detector_service.py | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import logging
from shot_detector.detectors import SimpleDetector
from .base_detector_service import BaseDetectorService
from .plot_service import PlotService
from shot_detector.utils.common import yes_no
from shot_detector.utils.log_meta import log_method_call_with
class ShotDetectorPlotService(PlotService, BaseDetectorService):
"""
Simple Shot Detector Service.
"""
def add_arguments(self, parser, **kwargs):
parser = super(ShotDetectorPlotService, self) \
.add_arguments(parser, **kwargs)
parser = self.add_video_arguments(parser, **kwargs)
parser = self.add_plot_arguments(parser, **kwargs)
return parser
def add_video_arguments(self, parser, **kwargs):
parser.add_argument(
'--ff', '--first-frame',
metavar='sec',
dest='first_frame',
type=int,
default=0,
)
parser.add_argument(
'--lf', '--last-frame',
metavar='sec',
dest='last_frame',
type=int,
default=60,
)
parser.add_argument(
'--as', '--as-stream',
default='no',
dest='as_stream',
type=yes_no,
)
return parser
@log_method_call_with(
level=logging.WARN,
logger=logging.getLogger(__name__)
)
def run(self, *kwargs):
options = self.options
detector = SimpleDetector()
detector.detect(
input_uri=options.input_uri,
format=options.format,
service_options=vars(options)
) | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import time
from shot_detector.detectors import SimpleDetector
from .base_detector_service import BaseDetectorService
from .plot_service import PlotService
from shot_detector.utils.common import yes_no
class ShotDetectorPlotService(PlotService, BaseDetectorService):
"""
Simple Shot Detector Service.
"""
def add_arguments(self, parser, **kwargs):
parser = super(ShotDetectorPlotService, self) \
.add_arguments(parser, **kwargs)
parser = self.add_video_arguments(parser, **kwargs)
parser = self.add_plot_arguments(parser, **kwargs)
return parser
def add_video_arguments(self, parser, **kwargs):
parser.add_argument(
'--ff', '--first-frame',
metavar='sec',
dest='first_frame',
type=int,
default=0,
)
parser.add_argument(
'--lf', '--last-frame',
metavar='sec',
dest='last_frame',
type=int,
default=60,
)
parser.add_argument(
'--as', '--as-stream',
default='no',
dest='as_stream',
type=yes_no,
)
return parser
def run(self, *kwargs):
options = self.options
detector = SimpleDetector()
t1 = time.time()
detector.detect(
input_uri=options.input_uri,
format=options.format,
service_options=vars(options)
)
t2 = time.time()
print(t2 - t1)
| Python | 0 |
6f05fa90a2134c24c753a50a43e91522531c72b6 | update update | wsgi/usgs_update_02.py | wsgi/usgs_update_02.py | #!/usr/bin/env python
# Parse USGS JSON files
# Populates the sites using the original URL requests
# USGS site doesn't seem to let you just dump everything
# For this purpose we use the hydrological are
# This value goes from 01 to 21 and makes it easy to construct a series of operations
# This version creates a customized dump because MongoDB apparently doesn't like
# a literal JSON format.
# Gordon Haff
import json
import string
import sys
import io
import urllib2
import pymongo
import os
output = {}
#setup the connection to the gauges database
conn = pymongo.Connection(os.environ['OPENSHIFT_MONGODB_DB_URL'])
db = conn.gauges
# for working purposes, only pulling in New England
for i in range(0,1):
req = urllib2.Request("http://waterservices.usgs.gov/nwis/iv/?format=json,1.1&huc=01¶meterCd=00060,00065&siteType=ST")
opener = urllib2.build_opener()
f = opener.open(req)
entry = json.loads(f.read())
count = int (len(entry['value']['timeSeries']) - 1)
while count >= 0:
#We construct an array of the relevant values associated with a guage number
#Note that gage height and discharge are in separate entries
#Right here we're just filling out the "permanent" values
#Gauge Number. This will be the dictionary index
agaugenum = entry['value']['timeSeries'][count]['sourceInfo']['siteCode'][0]['value']
#Site Name
#Going to assume that all the "permanent" attributes of a guage number are the
#same across entries. We'll use the first instance in any case
# asitename = entry['value']['timeSeries'][count]['sourceInfo']['siteName']
#Lat
# alat = entry['value']['timeSeries'][count]['sourceInfo']['geoLocation']['geogLocation']['latitude']
#Long
# along = entry['value']['timeSeries'][count]['sourceInfo']['geoLocation']['geogLocation']['longitude']
# save the variable code
variablecode = str(entry['value']['timeSeries'][count]['variable']['variableCode'][0]['variableID'])
# save the variable value
variablevalue = str(entry['value']['timeSeries'][count]['values'][0]['value'][0]['value'])
# save the time stamp
creationtime = str(entry['value']['timeSeries'][count]['values'][0]['value'][0]['dateTime'])
#Gage ht. ft. variableID 45807202
if variablecode == '45807202':
db.gaugepoints.update({"_id":agaugenum},{"$set":{"height":variablevalue}})
#Discharge cfs variableID 45807197
if variablecode == '45807197':
db.gaugepoints.update({"_id":agaugenum},{"$set":{"flow":variablevalue}})
#save creation time so that we can throw out any stale data
db.gaugepoints.update({"_id":agaugenum},{"$set":{"timestamp":creationtime}})
count = count - 1
| # Parse USGS JSON files
# Populates the sites using the original URL requests
# USGS site doesn't seem to let you just dump everything
# For this purpose we use the hydrological are
# This value goes from 01 to 21 and makes it easy to construct a series of operations
# This version creates a customized dump because MongoDB apparently doesn't like
# a literal JSON format.
# Gordon Haff
import json
import string
import sys
import io
import urllib2
import pymongo
import os
output = {}
#setup the connection to the gauges database
conn = pymongo.Connection(os.environ['OPENSHIFT_MONGODB_DB_URL'])
db = conn.gauges
# for working purposes, only pulling in New England
for i in range(0,1):
req = urllib2.Request("http://waterservices.usgs.gov/nwis/iv/?format=json,1.1&huc=01¶meterCd=00060,00065&siteType=ST")
opener = urllib2.build_opener()
f = opener.open(req)
entry = json.loads(f.read())
count = int (len(entry['value']['timeSeries']) - 1)
while count >= 0:
#We construct an array of the relevant values associated with a guage number
#Note that gage height and discharge are in separate entries
#Right here we're just filling out the "permanent" values
#Gauge Number. This will be the dictionary index
agaugenum = entry['value']['timeSeries'][count]['sourceInfo']['siteCode'][0]['value']
#Site Name
#Going to assume that all the "permanent" attributes of a guage number are the
#same across entries. We'll use the first instance in any case
# asitename = entry['value']['timeSeries'][count]['sourceInfo']['siteName']
#Lat
# alat = entry['value']['timeSeries'][count]['sourceInfo']['geoLocation']['geogLocation']['latitude']
#Long
# along = entry['value']['timeSeries'][count]['sourceInfo']['geoLocation']['geogLocation']['longitude']
# save the variable code
variablecode = str(entry['value']['timeSeries'][count]['variable']['variableCode'][0]['variableID'])
# save the variable value
variablevalue = str(entry['value']['timeSeries'][count]['values'][0]['value'][0]['value'])
# save the time stamp
creationtime = str(entry['value']['timeSeries'][count]['values'][0]['value'][0]['dateTime'])
#Gage ht. ft. variableID 45807202
if variablecode == '45807202':
db.gaugepoints.update({"_id":agaugenum},{"$set":{"height":variablevalue}})
#Discharge cfs variableID 45807197
if variablecode == '45807197':
db.gaugepoints.update({"_id":agaugenum},{"$set":{"flow":variablevalue}})
#save creation time so that we can throw out any stale data
db.gaugepoints.update({"_id":agaugenum},{"$set":{"timestamp":creationtime}})
count = count - 1
| Python | 0.000001 |
251e11ef777ece9542b21af1ed43fa580c2186b3 | Bump to 2.1.2 | opencanada/__init__.py | opencanada/__init__.py | from django.utils.version import get_version
VERSION = (2, 1, 2, 'final', 0)
__version__ = get_version(VERSION)
| from django.utils.version import get_version
VERSION = (2, 1, 1, 'final', 0)
__version__ = get_version(VERSION)
| Python | 0.000219 |
212d6bbc559c0a7fab74bff647a49817384e10ff | substitute {format} with json in oembed_url | embeddit/__init__.py | embeddit/__init__.py | import os
import re
import json
import requests
import fnmatch
from urllib import urlencode
from BeautifulSoup import BeautifulSoup
_ROOT = os.path.abspath(os.path.dirname(__file__))
invalid_url = {'error': 'Invalid URL'}
unreachable = {'error': 'Failed to reach the URL'}
empty_meta = {'error': 'Found no meta info for that url'}
class Embeddit(dict):
url = None
fetched = False
def __init__(self, url=None, *args, **kwargs):
if url:
self.url = url
self.fetch()
def fetch(self, force=False):
if self.fetched and not force:
return self
response = self.fetch_oembed_meta()
if 'error' in response:
# No oembed info found.
# Fall back to open graph
response = self.fetch_og_meta()
self.clear()
self.update(response)
self.fetched = True
return response
def to_json(self):
if not self.fetched:
self.fetch()
return json.dumps(self)
def fetch_oembed_meta(self):
try:
f = open(get_data('providers.json'), 'r')
providers = json.loads(f.read())
oembed_url = None
for provider in providers:
for endpoint in provider.get('endpoints', []):
for schema in endpoint.get('schemes', []):
if not schema.startswith('http://*') or not schema.startswith('https://*'):
schema = schema.replace('http://', 'http://*')
schema = schema.replace('https://', 'https://*')
if fnmatch.fnmatch(self.url, schema):
oembed_url = endpoint.get('url')
break
if not oembed_url:
provider_urls = [
provider.get('provider_url'),
provider.get('provider_url').replace('http://', 'https://')
]
for provider_url in provider_urls:
if fnmatch.fnmatch(self.url, provider_url + "*"):
oembed_url = provider.get('endpoints')[0].get('url')
break
if not oembed_url:
return invalid_url
params = urlencode({'url': self.url})
try:
results = requests.get('%s?%s' % (oembed_url.replace('{format}', 'json'), params))
content = json.loads(results.content)
content[u'source_type'] = 'oembed'
except ValueError:
params = urlencode({'url': self.url, 'format': 'json'})
results = requests.get('%s?%s' % (oembed_url, params))
content = json.loads(results.content)
content[u'source_type'] = 'oembed'
return content
except IndexError:
return empty_meta
except requests.exceptions.InvalidSchema:
return invalid_url
except requests.exceptions.HTTPError:
return unreachable
def fetch_og_meta(self):
try:
results = requests.get(self.url)
soup = BeautifulSoup(results.content)
meta = soup.findAll('meta')
content = {}
for tag in meta:
if tag.has_key('property'):
if re.search('og:', tag['property']) is not None:
key = re.sub('og:', '', tag['property'])
content[key] = tag['content']
if content == {}:
return empty_meta
else:
content[u'source_type'] = 'open_graph'
return content
except requests.exceptions.InvalidSchema:
return invalid_url
except requests.exceptions.HTTPError:
return unreachable
def get_data(path):
return os.path.join(_ROOT, 'data', path)
| import os
import re
import json
import requests
import fnmatch
from urllib import urlencode
from BeautifulSoup import BeautifulSoup
_ROOT = os.path.abspath(os.path.dirname(__file__))
invalid_url = {'error': 'Invalid URL'}
unreachable = {'error': 'Failed to reach the URL'}
empty_meta = {'error': 'Found no meta info for that url'}
class Embeddit(dict):
url = None
fetched = False
def __init__(self, url=None, *args, **kwargs):
if url:
self.url = url
self.fetch()
def fetch(self, force=False):
if self.fetched and not force:
return self
response = self.fetch_oembed_meta()
if 'error' in response:
# No oembed info found.
# Fall back to open graph
response = self.fetch_og_meta()
self.clear()
self.update(response)
self.fetched = True
return response
def to_json(self):
if not self.fetched:
self.fetch()
return json.dumps(self)
def fetch_oembed_meta(self):
try:
f = open(get_data('providers.json'), 'r')
providers = json.loads(f.read())
oembed_url = None
for provider in providers:
for endpoint in provider.get('endpoints', []):
for schema in endpoint.get('schemes', []):
if not schema.startswith('http://*') or not schema.startswith('https://*'):
schema = schema.replace('http://', 'http://*')
schema = schema.replace('https://', 'https://*')
if fnmatch.fnmatch(self.url, schema):
oembed_url = endpoint.get('url')
break
if not oembed_url:
provider_urls = [
provider.get('provider_url'),
provider.get('provider_url').replace('http://', 'https://')
]
for provider_url in provider_urls:
if fnmatch.fnmatch(self.url, provider_url + "*"):
oembed_url = provider.get('endpoints')[0].get('url')
break
if not oembed_url:
return invalid_url
params = urlencode({'url': self.url})
try:
results = requests.get('%s?%s' % (oembed_url, params))
content = json.loads(results.content)
content[u'source_type'] = 'oembed'
except ValueError:
params = urlencode({'url': self.url, 'format': 'json'})
results = requests.get('%s?%s' % (oembed_url, params))
content = json.loads(results.content)
content[u'source_type'] = 'oembed'
return content
except IndexError:
return empty_meta
except requests.exceptions.InvalidSchema:
return invalid_url
except requests.exceptions.HTTPError:
return unreachable
def fetch_og_meta(self):
try:
results = requests.get(self.url)
soup = BeautifulSoup(results.content)
meta = soup.findAll('meta')
content = {}
for tag in meta:
if tag.has_key('property'):
if re.search('og:', tag['property']) is not None:
key = re.sub('og:', '', tag['property'])
content[key] = tag['content']
if content == {}:
return empty_meta
else:
content[u'source_type'] = 'open_graph'
return content
except requests.exceptions.InvalidSchema:
return invalid_url
except requests.exceptions.HTTPError:
return unreachable
def get_data(path):
return os.path.join(_ROOT, 'data', path)
| Python | 0.000079 |
baa024a9e09607f8295cfe526a9eb25906aca806 | modify the filename | PyStudy/loadfile_speed.py | PyStudy/loadfile_speed.py | #!/usr/bin/env python
import datetime
count = 0
begin_time = datetime.datetime.now()
def readInChunks(fileObj, chunkSize=2048):
"""
Lazy function to read a file piece by piece.
Default chunk size: 2kB.
"""
while True:
data = fileObj.read(chunkSize)
if not data:
break
yield data
f = open('bigfile')
for chuck in readInChunks(f):
count = count + 1
end_time = datetime.datetime.now()
total_time = end_time - begin_time
print "chunk=%s, count=%i"%(total_time, count)
f.close()
count = 0
begin_time = datetime.datetime.now()
f = open('bigfile')
for line in f:
count = count + 1
end_time = datetime.datetime.now()
total_time = end_time - begin_time
print "read=%s, count=%i"%(total_time, count)
f.close()
| #!/usr/bin/env python
import datetime
count = 0
begin_time = datetime.datetime.now()
def readInChunks(fileObj, chunkSize=2048):
"""
Lazy function to read a file piece by piece.
Default chunk size: 2kB.
"""
while True:
data = fileObj.read(chunkSize)
if not data:
break
yield data
f = open('fastapi-requests.log.1')
for chuck in readInChunks(f):
count = count + 1
end_time = datetime.datetime.now()
total_time = end_time - begin_time
print "chunk=%s, count=%i"%(total_time, count)
f.close()
count = 0
begin_time = datetime.datetime.now()
f = open('fastapi-requests.log.1')
for line in f:
count = count + 1
end_time = datetime.datetime.now()
total_time = end_time - begin_time
print "read=%s, count=%i"%(total_time, count)
f.close()
| Python | 0.999999 |
211f88cc377b0d9432258d0ebc3fdc2ebd54302f | EDIT requirements updated. imports updated | nsaba/geneinfo.py | nsaba/geneinfo.py | """
geneinfo.py: methods for querying, saving
and loading gene information for NIH
database.
Author: Torben Noto
"""
import pandas as pd
import os
import random
import urllib2
from bs4 import BeautifulSoup
from time import sleep
from collections import namedtuple
def gene_info(eid):
"""
Pulls gene data based on Entrez ID from the NIH and returns summary.
Parameters
----------
eid : int
Entrez ID of interest
Returns
-------
(gene_name, gene_description) : (string, string)
gene_description contains a string of appropriately 30-80
characters describing function, relevance and attribution
of the gene specified by eid.
"""
if isinstance(eid, str):
try:
page_name = "http://www.ncbi.nlm.nih.gov/gene/?term=" + eid
page = urllib2.urlopen(page_name)
sleep(1+random.random())
soup = BeautifulSoup(page)
contents = []
for ana in soup.findAll('dd'):
if ana.parent.name == 'dl':
contents.append(ana.contents)
gene_name = contents[1][0]
gene_description = contents[9]
if not len(gene_description[0]) > 1:
gene_description = 'No description found'
return gene_name, gene_description
except IndexError:
print "%s isn't registered with the NIH" % eid
return 'No Gene identification found', 'No description found'
else:
raise TypeError("gene no must be a string")
def load_gene_file(path='.'):
"""
Loads file containing gene descriptions of genes specified by
their Entrez IDs from: http://www.ncbi.nlm.nih.gov/gene/ .
Parameters
----------
path : str, optional
Specifies path to gene_info.csv.
Returns
-------
pandas.DataFrame
Returns a DataFrame where each row contains three fields:
'Entrez', 'Gene Name' and 'Gene Description'. Where 'Entrez'
specifies the gene's Entrez ID and the last two fields are
of the same form as gene_info()'s returns.
NOTE: This assumes that correct CSV has been loaded.
"""
if isinstance(path, str):
gene_file = os.path.join(path, 'gene_info.csv')
df = pd.read_csv(gene_file)
return df
else:
raise TypeError("Gene-file path must be a string")
def get_gene_info(path, gene_ids):
"""
Extracts gene information from DataFrame created by
load_gene_file() for specific genes based on list
of Entrez IDs.
Parameters
---------
path : str
Specifies path to gene_info.csv.
gene_ids : list [ int ]
List of Entrez IDs of gene descriptions to be fetched
Returns
-------
output : list [ gi_tuple (long, str, u-str) ]
Returns a list of gene information for specified
Entrez IDs in form: ('Entrez', 'Gene Name' 'Gene Description').
"""
gi_tuple = namedtuple("gi_tuple", "entrez name description")
df = load_gene_file(path)
output = []
for gene_id in gene_ids:
if gene_id in df['Entrez']:
gi = df[df['Entrez'] == gene_id].as_matrix()[0]
output.append(gi_tuple(gi[0], gi[1], gi[2]))
else:
print 'Gene %s not found in NIH database' % gene_id
return output
| """
geneinfo.py: methods for querying, saving
and loading gene information for NIH
database.
Author: Torben Noto
"""
import pandas as pd
import os
import random
import urllib2
from BeautifulSoup import BeautifulSoup
from time import sleep
from collections import namedtuple
def gene_info(eid):
"""
Pulls gene data based on Entrez ID from the NIH and returns summary.
Parameters
----------
eid : int
Entrez ID of interest
Returns
-------
(gene_name, gene_description) : (string, string)
gene_description contains a string of appropriately 30-80
characters describing function, relevance and attribution
of the gene specified by eid.
"""
if isinstance(eid, str):
try:
page_name = "http://www.ncbi.nlm.nih.gov/gene/?term=" + eid
page = urllib2.urlopen(page_name)
sleep(1+random.random())
soup = BeautifulSoup(page)
contents = []
for ana in soup.findAll('dd'):
if ana.parent.name == 'dl':
contents.append(ana.contents)
gene_name = contents[1][0]
gene_description = contents[9]
if not len(gene_description[0]) > 1:
gene_description = 'No description found'
return gene_name, gene_description
except IndexError:
print "%s isn't registered with the NIH" % eid
return 'No Gene identification found', 'No description found'
else:
raise TypeError("gene no must be a string")
def load_gene_file(path='.'):
"""
Loads file containing gene descriptions of genes specified by
their Entrez IDs from: http://www.ncbi.nlm.nih.gov/gene/ .
Parameters
----------
path : str, optional
Specifies path to gene_info.csv.
Returns
-------
pandas.DataFrame
Returns a DataFrame where each row contains three fields:
'Entrez', 'Gene Name' and 'Gene Description'. Where 'Entrez'
specifies the gene's Entrez ID and the last two fields are
of the same form as gene_info()'s returns.
NOTE: This assumes that correct CSV has been loaded.
"""
if isinstance(path, str):
gene_file = os.path.join(path, 'gene_info.csv')
df = pd.read_csv(gene_file)
return df
else:
raise TypeError("Gene-file path must be a string")
def get_gene_info(path, gene_ids):
"""
Extracts gene information from DataFrame created by
load_gene_file() for specific genes based on list
of Entrez IDs.
Parameters
---------
path : str
Specifies path to gene_info.csv.
gene_ids : list [ int ]
List of Entrez IDs of gene descriptions to be fetched
Returns
-------
output : list [ gi_tuple (long, str, u-str) ]
Returns a list of gene information for specified
Entrez IDs in form: ('Entrez', 'Gene Name' 'Gene Description').
"""
gi_tuple = namedtuple("gi_tuple", "entrez name description")
df = load_gene_file(path)
output = []
for gene_id in gene_ids:
if gene_id in df['Entrez']:
gi = df[df['Entrez'] == gene_id].as_matrix()[0]
output.append(gi_tuple(gi[0], gi[1], gi[2]))
else:
print 'Gene %s not found in NIH database' % gene_id
return output
| Python | 0.00325 |
96e26b74851c0b54493f3c269ceefb6b2ae53e7d | implement fromXml toXml and defaultInit method of Resolution class | settingMod/Resolution.py | settingMod/Resolution.py | #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage resolution settings'''
import xml.etree.ElementTree as xmlMod
from settingMod.Size import *
import os
class Resolution:
'''class to manage resolution settings'''
def __init__(self, xml= None):
'''initialize resolution settings with default value or values extracted from an xml object'''
if xml is None:
self.defaultInit()
else:
self.fromXml(xml)
def defaultInit(self):
'''initialize resolution settings with default value'''
self.pourcent = 100
self.size = Size('1920x1080')
def fromXml(self, xml):
'''initialize resolution settings with values extracted from an xml object'''
self.pourcent = int(xml.get('pourcent'))
self.size = Size(xml = xml)
def toXml(self):
'''export resolution settings into xml syntaxed string'''
return '<resolution pourcent="'+str(self.pourcent)+'" '+self.size.toXmlAttr()+' />'
def see(self, log):
'''menu to explore and edit resolution settings settings'''
def print(self):
'''a method to print preset'''
| #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage resolution settings'''
import xml.etree.ElementTree as xmlMod
from settingMod.Size import *
import os
class Resolution:
'''class to manage resolution settings'''
def __init__(self, xml= None):
'''initialize resolution settings with default value or values extracted from an xml object'''
if xml is None:
self.defaultInit()
else:
self.fromXml(xml)
def defaultInit(self):
'''initialize resolution settings with default value'''
def fromXml(self, xml):
'''initialize resolution settings with values extracted from an xml object'''
def toXml(self):
'''export resolution settings into xml syntaxed string'''
def see(self, log):
'''menu to explore and edit resolution settings settings'''
def print(self):
'''a method to print preset'''
| Python | 0 |
44dcbfe606377331a40777a7b387768c816b0e61 | Increment to .2.11 for new package | nymms/__init__.py | nymms/__init__.py | __version__ = '0.2.11'
| __version__ = '0.2.10'
| Python | 0.000017 |
e3a93aff39ed4a876bdfabd5e62271bce9fe11e9 | remove unused analyzers import clause | src/cmdlr/amgr.py | src/cmdlr/amgr.py | """Cmdlr analyzers holder and importer."""
import importlib
import pkgutil
import os
import sys
import functools
import re
from .exception import NoMatchAnalyzer
from .exception import ExtraAnalyzersDirNotExists
from .exception import AnalyzerRuntimeError
class AnalyzerManager:
"""Import, active, dispatch and hold all analyzer."""
analyzers_pkgpath = 'cmdlr.analyzers'
def __init__(self, config):
"""Import all analyzers."""
self.__analyzers = {}
self.__analyzer_picker = None
self.config = config
self.__import_all_analyzer()
self.__build_analyzer_picker()
def __import_all_analyzer(self):
extra_analyzer_dir = self.config.extra_analyzer_dir
disabled_analyzers = self.config.disabled_analyzers
analyzer_dirs = [os.path.join(os.path.dirname(__file__), 'analyzers')]
if extra_analyzer_dir and not os.path.isdir(extra_analyzer_dir):
raise ExtraAnalyzersDirNotExists(
'extra_analyzer_dir already be set but not exists, path: "{}"'
.format(extra_analyzer_dir))
elif extra_analyzer_dir:
analyzer_dirs[:0] = [extra_analyzer_dir]
for finder, module_name, ispkg in pkgutil.iter_modules(analyzer_dirs):
if module_name not in disabled_analyzers:
full_module_name = (type(self).analyzers_pkgpath
+ '.'
+ module_name)
spec = finder.find_spec(full_module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[full_module_name] = module
spec.loader.exec_module(module)
aname = module_name
self.__analyzers[aname] = module.Analyzer(
customization=self.config.get_customization(aname),
)
self.__analyzers[aname].aname = aname
def __build_analyzer_picker(self):
retype = type(re.compile(''))
mappers = []
for aname, analyzer in self.__analyzers.items():
for pattern in analyzer.entry_patterns:
if isinstance(pattern, retype):
mappers.append((pattern, analyzer))
elif isinstance(pattern, str):
mappers.append((re.compile(pattern), analyzer))
else:
raise AnalyzerRuntimeError(
'some entry pattern in analyzer "{}"'
' neither str nor re.compile type'
.format(aname)
)
def analyzer_picker(curl):
for pattern, analyzer in mappers:
if pattern.search(curl):
return analyzer
raise NoMatchAnalyzer(
'No Matched Analyzer: {}'.format(curl),
)
self.__analyzer_picker = analyzer_picker
@functools.lru_cache(maxsize=None, typed=True)
def get_match_analyzer(self, curl):
"""Get a url matched analyzer."""
return self.__analyzer_picker(curl)
@functools.lru_cache(maxsize=None, typed=True)
def get_normalized_entry(self, curl):
"""Return the normalized entry url."""
return self.get_match_analyzer(curl).entry_normalizer(curl)
def get_analyzer_infos(self):
"""Return all analyzer info."""
def get_desc(analyzer):
return analyzer.__doc__
unsorted_infos = [
(aname, get_desc(analyzer))
for aname, analyzer in self.__analyzers.items()
]
return sorted(unsorted_infos, key=lambda item: item[0])
| """Cmdlr analyzers holder and importer."""
import importlib
import pkgutil
import os
import sys
import functools
import re
from . import analyzers as _analyzers # NOQA
from .exception import NoMatchAnalyzer
from .exception import ExtraAnalyzersDirNotExists
from .exception import AnalyzerRuntimeError
class AnalyzerManager:
"""Import, active, dispatch and hold all analyzer."""
analyzers_pkgpath = 'cmdlr.analyzers'
def __init__(self, config):
"""Import all analyzers."""
self.__analyzers = {}
self.__analyzer_picker = None
self.config = config
self.__import_all_analyzer()
self.__build_analyzer_picker()
def __import_all_analyzer(self):
extra_analyzer_dir = self.config.extra_analyzer_dir
disabled_analyzers = self.config.disabled_analyzers
analyzer_dirs = [os.path.join(os.path.dirname(__file__), 'analyzers')]
if extra_analyzer_dir and not os.path.isdir(extra_analyzer_dir):
raise ExtraAnalyzersDirNotExists(
'extra_analyzer_dir already be set but not exists, path: "{}"'
.format(extra_analyzer_dir))
elif extra_analyzer_dir:
analyzer_dirs[:0] = [extra_analyzer_dir]
for finder, module_name, ispkg in pkgutil.iter_modules(analyzer_dirs):
if module_name not in disabled_analyzers:
full_module_name = (type(self).analyzers_pkgpath
+ '.'
+ module_name)
spec = finder.find_spec(full_module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[full_module_name] = module
spec.loader.exec_module(module)
aname = module_name
self.__analyzers[aname] = module.Analyzer(
customization=self.config.get_customization(aname),
)
self.__analyzers[aname].aname = aname
def __build_analyzer_picker(self):
retype = type(re.compile(''))
mappers = []
for aname, analyzer in self.__analyzers.items():
for pattern in analyzer.entry_patterns:
if isinstance(pattern, retype):
mappers.append((pattern, analyzer))
elif isinstance(pattern, str):
mappers.append((re.compile(pattern), analyzer))
else:
raise AnalyzerRuntimeError(
'some entry pattern in analyzer "{}"'
' neither str nor re.compile type'
.format(aname)
)
def analyzer_picker(curl):
for pattern, analyzer in mappers:
if pattern.search(curl):
return analyzer
raise NoMatchAnalyzer(
'No Matched Analyzer: {}'.format(curl),
)
self.__analyzer_picker = analyzer_picker
@functools.lru_cache(maxsize=None, typed=True)
def get_match_analyzer(self, curl):
"""Get a url matched analyzer."""
return self.__analyzer_picker(curl)
@functools.lru_cache(maxsize=None, typed=True)
def get_normalized_entry(self, curl):
"""Return the normalized entry url."""
return self.get_match_analyzer(curl).entry_normalizer(curl)
def get_analyzer_infos(self):
"""Return all analyzer info."""
def get_desc(analyzer):
return analyzer.__doc__
unsorted_infos = [
(aname, get_desc(analyzer))
for aname, analyzer in self.__analyzers.items()
]
return sorted(unsorted_infos, key=lambda item: item[0])
| Python | 0.000001 |
7095380ff71947f76ff60765e699da8e31fde944 | Build - remove dir directory - not used | project_generator/commands/build.py | project_generator/commands/build.py | # Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from ..tools_supported import ToolsSupported
from ..generate import Generator
from ..settings import ProjectSettings
help = 'Build a project'
def run(args):
# Export if we know how, otherwise return
if os.path.exists(args.file):
generator = Generator(args.file)
for project in generator.generate(args.project):
export_result = project.export(args.tool, args.copy)
build_result = project.build(args.tool)
if build_result == 0 and export_result == 0:
return 0
else:
return -1
else:
# not project known by pgen
logging.warning("%s not found." % args.file)
return -1
def setup(subparser):
subparser.add_argument(
"-f", "--file", help="YAML projects file", default='projects.yaml')
subparser.add_argument("-p", "--project", help="Name of the project to build", default = '')
subparser.add_argument(
"-t", "--tool", help="Build a project files for provided tool")
subparser.add_argument(
"-c", "--copy", action="store_true", help="Copy all files to the exported directory")
| # Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from ..tools_supported import ToolsSupported
from ..generate import Generator
from ..settings import ProjectSettings
help = 'Build a project'
def run(args):
# Export if we know how, otherwise return
if os.path.exists(args.file):
generator = Generator(args.file)
for project in generator.generate(args.project):
export_result = project.export(args.tool, args.copy)
build_result = project.build(args.tool)
if build_result == 0 and export_result == 0:
return 0
else:
return -1
else:
# not project known by pgen
logging.warning("%s not found." % args.file)
return -1
def setup(subparser):
subparser.add_argument(
"-f", "--file", help="YAML projects file", default='projects.yaml')
subparser.add_argument("-p", "--project", help="Name of the project to build", default = '')
subparser.add_argument(
"-t", "--tool", help="Build a project files for provided tool")
subparser.add_argument(
"-dir", "--directory", help="The projects directory")
subparser.add_argument(
"-c", "--copy", action="store_true", help="Copy all files to the exported directory")
| Python | 0 |
9ec02a7cc31766d2b0d46547addddc0ca350e8ed | make pylint even more happy | neuralmonkey/runners/perplexity_runner.py | neuralmonkey/runners/perplexity_runner.py | """
This module contains an implementation of a runner that is supposed to be
used in case we train a language model. Instead of decoding sentences in
computes its perplexities given the decoder.
"""
#tests: lint
from neuralmonkey.learning_utils import feed_dicts
#pylint: disable=too-few-public-methods
class PerplexityRunner(object):
def __init__(self, decoder, batch_size):
self.decoder = decoder
self.batch_size = batch_size
self.vocabulary = decoder.vocabulary
def __call__(self, sess, dataset, coders):
if not dataset.has_series(self.decoder.data_id):
raise Exception("Dataset must have the target values ({})"
"for computing perplexity."
.format(self.decoder.data_id))
perplexities = []
train_loss = 0.0
runtime_loss = 0.0
batch_count = 0
for batch in dataset.batch_dataset(self.batch_size):
batch_count += 1
batch_feed_dict = feed_dicts(batch, coders, train=False)
cross_entropies, opt_loss, dec_loss = sess.run(
[self.decoder.cross_entropies,
self.decoder.train_loss,
self.decoder.runtime_loss],
feed_dict=batch_feed_dict)
perplexities.extend([2 ** xent for xent in cross_entropies])
train_loss += opt_loss
runtime_loss += dec_loss
avg_train_loss = train_loss / batch_count
avg_runtime_loss = runtime_loss / batch_count
return perplexities, avg_train_loss, avg_runtime_loss
| """
This module contains an implementation of a runner that is supposed to be
used in case we train a language model. Instead of decoding sentences in
computes its perplexities given the decoder.
"""
#tests: lint
from neuralmonkey.learning_utils import feed_dicts
class PerplexityRunner(object):
def __init__(self, decoder, batch_size):
self.decoder = decoder
self.batch_size = batch_size
self.vocabulary = decoder.vocabulary
def __call__(self, sess, dataset, coders):
if not dataset.has_series(self.decoder.data_id):
raise Exception("Dataset must have the target values ({})"
"for computing perplexity."
.format(self.decoder.data_id))
batched_dataset = dataset.batch_dataset(self.batch_size)
losses = [self.decoder.train_loss,
self.decoder.runtime_loss]
perplexities = []
train_loss = 0.0
runtime_loss = 0.0
batch_count = 0
for batch in batched_dataset:
batch_count += 1
batch_feed_dict = feed_dicts(batch, coders, train=False)
cross_entropies, opt_loss, dec_loss = sess.run(
[self.decoder.cross_entropies] + losses,
feed_dict=batch_feed_dict)
perplexities.extend([2 ** xent for xent in cross_entropies])
train_loss += opt_loss
runtime_loss += dec_loss
avg_train_loss = train_loss / batch_count
avg_runtime_loss = runtime_loss / batch_count
return perplexities, avg_train_loss, avg_runtime_loss
| Python | 0.000001 |
1fc9561148402c4eb558d183f4d8f3ecce0a0330 | Set version to 0.4.1 | alignak_backend/__init__.py | alignak_backend/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak REST backend
"""
# Application manifest
VERSION = (0, 4, 1)
__application__ = u"Alignak_Backend"
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__author__ = u"Alignak team"
__copyright__ = u"(c) 2015 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__description__ = u"Alignak REST backend"
__releasenotes__ = u"""Alignak REST Backend"""
__doc_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend"
# Application manifest
manifest = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'doc': __doc_url__
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak REST backend
"""
# Application manifest
VERSION = (0, 4, 0)
__application__ = u"Alignak_Backend"
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__author__ = u"Alignak team"
__copyright__ = u"(c) 2015 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__description__ = u"Alignak REST backend"
__releasenotes__ = u"""Alignak REST Backend"""
__doc_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend"
# Application manifest
manifest = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'doc': __doc_url__
}
| Python | 0 |
e77381b087acd935bc3dae1f6c2e809970506db9 | remove SECRET_KEY, again | bepasty/config.py | bepasty/config.py | # Copyright: 2013 Bastian Blank <bastian@waldi.eu.org>
# License: BSD 2-clause, see LICENSE for details.
class Config(object):
"""This is the basic configuration class for bepasty."""
#: name of this site (put YOUR bepasty fqdn here)
SITENAME = 'bepasty.example.org'
UPLOAD_UNLOCKED = True
"""
.. warning::
Uploads are default unlocked. Actually the admin should manual
unlock the uploaded files to avoid copyright issues. In hosted
version you should set ``UPLOAD_UNLOCKED = False``.
"""
#: Define storage module
#: Available:
#: - filesystem
#: - ceph
STORAGE = 'filesystem'
#: Filesystem storage specific config
STORAGE_FILESYSTEM_DIRECTORY = '/tmp/'
#: Config file for CEPH storage
STORAGE_CEPH_CONFIG_FILE = '/etc/ceph/ceph.conf'
#: CEPH pool name for actually data
STORAGE_CEPH_POOL_DATA = 'bepasty-data'
#: CEPH pool name for meta data
STORAGE_CEPH_POOL_META = 'bepasty-meta'
#: server secret key needed for secure cookies
#: you must set a very long, very random, very secret string here,
#: otherwise bepasty will not work (and crash when trying to log in)!
SECRET_KEY = ''
#: not logged-in users get these permissions
#: usually either nothing ('') or read-only ('read'):
DEFAULT_PERMISSIONS = ''
#: logged-in users may get more permissions
#: you need a login secret to log in and, depending on that secret, you will
#: get the configured permissions.
#: you can use same secret / same permissions for all privileged users or
#: set up different secrets / different permissions.
#: PERMISSIONS is a dict that maps secrets to permissions, use it like:
#: PERMISSIONS = {
#: 'myadminsecret': 'admin,create,read,delete',
#: 'myuploadersecret': 'create,read',
#: }
PERMISSIONS = {
}
| # Copyright: 2013 Bastian Blank <bastian@waldi.eu.org>
# License: BSD 2-clause, see LICENSE for details.
class Config(object):
"""This is the basic configuration class for bepasty."""
#: name of this site (put YOUR bepasty fqdn here)
SITENAME = 'bepasty.example.org'
UPLOAD_UNLOCKED = True
"""
.. warning::
Uploads are default unlocked. Actually the admin should manual
unlock the uploaded files to avoid copyright issues. In hosted
version you should set ``UPLOAD_UNLOCKED = False``.
"""
#: Define storage module
#: Available:
#: - filesystem
#: - ceph
STORAGE = 'filesystem'
#: Filesystem storage specific config
STORAGE_FILESYSTEM_DIRECTORY = '/tmp/'
#: Config file for CEPH storage
STORAGE_CEPH_CONFIG_FILE = '/etc/ceph/ceph.conf'
#: CEPH pool name for actually data
STORAGE_CEPH_POOL_DATA = 'bepasty-data'
#: CEPH pool name for meta data
STORAGE_CEPH_POOL_META = 'bepasty-meta'
#: server secret key needed for secure cookies
#: you must set a very long, very random, very secret string here,
#: otherwise bepasty will not work (and crash when trying to log in)!
SECRET_KEY = 'xx'
#: not logged-in users get these permissions
#: usually either nothing ('') or read-only ('read'):
DEFAULT_PERMISSIONS = ''
#: logged-in users may get more permissions
#: you need a login secret to log in and, depending on that secret, you will
#: get the configured permissions.
#: you can use same secret / same permissions for all privileged users or
#: set up different secrets / different permissions.
#: PERMISSIONS is a dict that maps secrets to permissions, use it like:
#: PERMISSIONS = {
#: 'myadminsecret': 'admin,create,read,delete',
#: 'myuploadersecret': 'create,read',
#: }
PERMISSIONS = {
}
| Python | 0.000007 |
fd9039ac78985fc5f06f3f01bfafeacdb22f354b | Create sortable tables within the excel sheets | src/spz/spz/tables.py | src/spz/spz/tables.py | # -*- coding: utf-8 -*-
"""Table export utility.
Used to format course lists for download.
"""
import csv
import io
from tempfile import NamedTemporaryFile
from openpyxl import Workbook
from openpyxl.worksheet.table import Table
from flask import make_response, url_for, redirect, flash
def export_course_list(courses, format):
if format == 'csv':
return export(CSVWriter(), courses)
elif format == 'xlsx':
return export(ExcelWriter(), courses)
else:
flash('Ungueltiges Export-Format: {0}'.format(format), 'error')
return redirect(url_for('lists'))
class CSVWriter:
mimetype = 'text/csv'
def __init__(self):
self.buf = io.StringIO()
self.out = csv.writer(self.buf, delimiter=";", dialect=csv.excel)
self.mimetype = 'text/csv'
self.filename = 'Kursliste.csv'
self.header_written = False
def write_heading(self, values):
if not self.header_written:
self.write_row(values)
self.header_written = True
def write_row(self, values):
string_values = [str(v) if v else '' for v in values]
self.out.writerow(string_values)
def new_section(self, name):
# CSV does not support sections
pass
def get_data(self):
return self.buf.getvalue()
class ExcelWriter:
def __init__(self):
# write_only=True would require additional logic to keep track of sheet dimension so we keep it at False
# (see sheet.dimensions in end_section())
self.workbook = Workbook(write_only=False)
self.mimetype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
self.filename = 'Kursliste.xlsx'
self.workbook._sheets.clear() # start off with no sheets
def write_heading(self, values):
self.write_row(values)
def write_row(self, values):
self.workbook._sheets[-1].append(values)
def new_section(self, name):
if self.workbook._sheets:
self.end_section()
self.workbook.create_sheet(name)
def get_data(self):
if self.workbook._sheets:
self.end_section()
with NamedTemporaryFile() as file:
self.workbook.save(file.name)
file.seek(0)
stream = file.read()
return stream
def end_section(self):
sheet = self.workbook._sheets[-1]
# create a table within the excel sheet to simplify sorting by values
tableName = sheet.title.replace(' ', '_') # needs to be unique and must not contain spaces
table = Table(displayName=tableName, ref=sheet.dimensions)
sheet.add_table(table)
def export(writer, courses):
# XXX: header -- not standardized
header = ['Kurs', 'Kursplatz', 'Bewerbernummer', 'Vorname', 'Nachname', 'Mail',
'Matrikelnummer', 'Telefon', 'Studienabschluss', 'Semester', 'Bewerberkreis']
for course in courses:
writer.new_section(course.full_name())
writer.write_heading(header)
active_no_debt = [attendance.applicant for attendance in course.attendances
if not attendance.waiting and (not attendance.has_to_pay or attendance.amountpaid > 0)]
idx = 1
for applicant in active_no_debt:
writer.write_row([course.full_name(),
idx,
applicant.id,
applicant.first_name,
applicant.last_name,
applicant.mail,
applicant.tag,
applicant.phone,
applicant.degree.name if applicant.degree else None,
applicant.semester,
applicant.origin.name if applicant.origin else None])
idx += 1
resp = make_response(writer.get_data())
resp.headers['Content-Disposition'] = 'attachment; filename="{0}"'.format(writer.filename)
resp.mimetype = writer.mimetype
return resp
| # -*- coding: utf-8 -*-
"""Table export utility.
Used to format course lists for download.
"""
import csv
import io
from tempfile import NamedTemporaryFile
from openpyxl import Workbook
from flask import make_response, url_for, redirect, flash
def export_course_list(courses, format):
if format == 'csv':
return export(CSVWriter(), courses)
elif format == 'xlsx':
return export(ExcelWriter(), courses)
else:
flash('Ungueltiges Export-Format: {0}'.format(format), 'error')
return redirect(url_for('lists'))
class CSVWriter:
mimetype = 'text/csv'
def __init__(self):
self.buf = io.StringIO()
self.out = csv.writer(self.buf, delimiter=";", dialect=csv.excel)
self.mimetype = 'text/csv'
self.filename = 'Kursliste.csv'
self.header_written = False
def write_heading(self, values):
if not self.header_written:
self.write_row(values)
self.header_written = True
def write_row(self, values):
string_values = [str(v) if v else '' for v in values]
self.out.writerow(string_values)
def new_section(self, name):
pass
def get_data(self):
return self.buf.getvalue()
class ExcelWriter:
def __init__(self):
self.workbook = Workbook(write_only=True)
self.mimetype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
self.filename = 'Kursliste.xlsx'
def write_heading(self, values):
self.write_row(values)
def write_row(self, values):
self.workbook._sheets[-1].append(values)
def new_section(self, name):
self.workbook.create_sheet(name)
def get_data(self):
with NamedTemporaryFile() as file:
self.workbook.save(file.name)
file.seek(0)
stream = file.read()
return stream
def export(writer, courses):
# XXX: header -- not standardized
header = ['Kurs', 'Kursplatz', 'Bewerbernummer', 'Vorname', 'Nachname', 'Mail',
'Matrikelnummer', 'Telefon', 'Studienabschluss', 'Semester', 'Bewerberkreis']
for course in courses:
writer.new_section(course.full_name())
writer.write_heading(header)
active_no_debt = [attendance.applicant for attendance in course.attendances
if not attendance.waiting and (not attendance.has_to_pay or attendance.amountpaid > 0)]
idx = 1
for applicant in active_no_debt:
writer.write_row([course.full_name(),
idx,
applicant.id,
applicant.first_name,
applicant.last_name,
applicant.mail,
applicant.tag,
applicant.phone,
applicant.degree.name if applicant.degree else None,
applicant.semester,
applicant.origin.name if applicant.origin else None])
idx += 1
resp = make_response(writer.get_data())
resp.headers['Content-Disposition'] = 'attachment; filename="{0}"'.format(writer.filename)
resp.mimetype = writer.mimetype
return resp
| Python | 0.000002 |
b8e53ed353bf28bc1e532ae1577bf4a8b4ce976f | Add missing import | hackeriet/cardreaderd/__init__.py | hackeriet/cardreaderd/__init__.py | #!/usr/bin/env python
from hackeriet import mifare
from hackeriet.mqtt import MQTT
from hackeriet.door import users
import os, logging, time
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
door_name = os.getenv("DOOR_NAME", 'hackeriet')
door_topic = "hackeriet/door/%s/open" % door_name
door_timeout = int(os.getenv("DOOR_TIMEOUT", 2))
mqtt = MQTT()
def main():
logging.debug('Starting main loop')
while True:
users.load()
# Read data from card reader
logging.debug('mifare: waiting for data...')
data = mifare.try_read()
if data:
logging.debug('mifare: data read')
user = users.auth(data[0:16])
if user:
ascii_user = user.encode('ascii', 'replace').decode('ascii')
logging.info('auth: card read for user %s' % ascii_user)
mqtt(door_topic, user)
else:
logging.debug('auth: card data does not belong to a user: %s' % data[0:16])
# Avoid spewing messages every single ms while a card is in front of the reader
time.sleep(door_timeout)
else:
logging.debug('mifare: no data read in last attempt')
if __name__ == "__main__":
main()
| #!/usr/bin/env python
from hackeriet import mifare
from hackeriet.mqtt import MQTT
from hackeriet.door import users
import os, logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
door_name = os.getenv("DOOR_NAME", 'hackeriet')
door_topic = "hackeriet/door/%s/open" % door_name
door_timeout = int(os.getenv("DOOR_TIMEOUT", 2))
mqtt = MQTT()
def main():
logging.debug('Starting main loop')
while True:
users.load()
# Read data from card reader
logging.debug('mifare: waiting for data...')
data = mifare.try_read()
if data:
logging.debug('mifare: data read')
user = users.auth(data[0:16])
if user:
ascii_user = user.encode('ascii', 'replace').decode('ascii')
logging.info('auth: card read for user %s' % ascii_user)
mqtt(door_topic, user)
else:
logging.debug('auth: card data does not belong to a user: %s' % data[0:16])
# Avoid spewing messages every single ms while a card is in front of the reader
time.sleep(door_timeout)
else:
logging.debug('mifare: no data read in last attempt')
if __name__ == "__main__":
main()
| Python | 0.000466 |
2e042201d6c0e0709d7056d399052389d1ea54b0 | Move imports inside initialize() method so that we don’t break things on initial setup. | shopify_auth/__init__.py | shopify_auth/__init__.py | VERSION = (0, 1, 6)
__version__ = '.'.join(map(str, VERSION))
__author__ = 'Gavin Ballard'
def initialize():
import shopify
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
if not settings.SHOPIFY_APP_API_KEY or not settings.SHOPIFY_APP_API_SECRET:
raise ImproperlyConfigured("SHOPIFY_APP_API_KEY and SHOPIFY_APP_API_SECRET must be set in settings")
shopify.Session.setup(api_key = settings.SHOPIFY_APP_API_KEY, secret = settings.SHOPIFY_APP_API_SECRET) | import shopify
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
VERSION = (0, 1, 5)
__version__ = '.'.join(map(str, VERSION))
__author__ = 'Gavin Ballard'
def initialize():
if not settings.SHOPIFY_APP_API_KEY or not settings.SHOPIFY_APP_API_SECRET:
raise ImproperlyConfigured("SHOPIFY_APP_API_KEY and SHOPIFY_APP_API_SECRET must be set in settings")
shopify.Session.setup(api_key = settings.SHOPIFY_APP_API_KEY, secret = settings.SHOPIFY_APP_API_SECRET) | Python | 0 |
40d59c44f8488ab6445b626637bfb3135cbbfd56 | Clean up Firefox WebDriver constructor | py/selenium/webdriver/firefox/webdriver.py | py/selenium/webdriver/firefox/webdriver.py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
try:
basestring
except NameError: # Python 3.x
basestring = str
import shutil
import socket
import sys
from .firefox_binary import FirefoxBinary
from .remote_connection import FirefoxRemoteConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.extension_connection import ExtensionConnection
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
capabilities=None, proxy=None, executable_path="wires", firefox_options=None):
capabilities = capabilities or DesiredCapabilities.FIREFOX.copy()
self.profile = firefox_profile or FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
self.binary = firefox_binary or capabilities.get("binary", FirefoxBinary())
self.options = firefox_options or Options()
self.options.binary_location = self.binary if isinstance(self.binary, basestring) else self.binary._get_firefox_start_cmd()
self.options.profile = self.profile
capabilities.update(self.options.to_capabilities())
# marionette
if capabilities.get("marionette"):
self.service = Service(executable_path, firefox_binary=self.options.binary_location)
self.service.start()
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
else:
# Oh well... sometimes the old way is the best way.
if proxy is not None:
proxy.add_to_capabilities(capabilities)
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
if "specificationLevel" in self.capabilities:
self.service.stop()
else:
self.binary.kill()
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
| # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
try:
basestring
except NameError: # Python 3.x
basestring = str
import shutil
import socket
import sys
from .firefox_binary import FirefoxBinary
from .remote_connection import FirefoxRemoteConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.extension_connection import ExtensionConnection
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
capabilities=None, proxy=None, executable_path="wires", firefox_options=None):
self.profile = firefox_profile
self.binary = firefox_binary
if firefox_options is None:
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX
if self.binary is None:
self.binary = capabilities.get("binary") or FirefoxBinary()
firefox_options = Options()
firefox_options.binary_location = self.binary if isinstance(self.binary, basestring) else self.binary._get_firefox_start_cmd()
firefox_options.profile = self.profile
if capabilities is None:
capabilities = firefox_options.to_capabilities()
else:
capabilities.update(firefox_options.to_capabilities())
# marionette
if capabilities.get("marionette"):
self.binary = firefox_options.binary_location
if isinstance(firefox_options.binary_location, FirefoxBinary):
self.binary = firefox_options.binary_location._get_firefox_start_cmd()
self.service = Service(executable_path, firefox_binary=self.binary)
self.service.start()
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
else:
# Oh well... sometimes the old way is the best way.
if proxy is not None:
proxy.add_to_capabilities(capabilities)
if self.binary is None:
self.binary = firefox_options.binary_location or FirefoxBinary()
if self.profile is None:
self.profile = firefox_options.profile or FirefoxProfile()
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
if "specificationLevel" in self.capabilities:
self.service.stop()
else:
self.binary.kill()
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
| Python | 0.000007 |
c33b23e1d5263321cc29e2fe1f9871e36d97c5e5 | add method get on opps db redis | opps/db/_redis.py | opps/db/_redis.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from opps.db.conf import settings
from redis import ConnectionPool
from redis import Redis as RedisClient
class Redis:
def __init__(self, key_prefix, key_sufix):
self.key_prefix = key_prefix
self.key_sufix = key_sufix
self.host = settings.OPPS_DB_HOST
self.port = settings.OPPS_DB_PORT
self.db = 0
pool = ConnectionPool(host=self.host,
port=self.port,
db=self.db)
self.conn = RedisClient(connection_pool=pool)
def close(self):
self.conn = None
return True
def key(self):
return '{}_{}_{}'.format(settings.OPPS_DB_NAME,
self.key_prefix,
self.key_sufix)
def save(self, document):
return self.conn.set(self.key(), document)
def get(self):
self.conn.get(self.key())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from opps.db.conf import settings
from redis import ConnectionPool
from redis import Redis as RedisClient
class Redis:
def __init__(self, key_prefix, key_sufix):
self.key_prefix = key_prefix
self.key_sufix = key_sufix
self.host = settings.OPPS_DB_HOST
self.port = settings.OPPS_DB_PORT
self.db = 0
pool = ConnectionPool(host=self.host,
port=self.port,
db=self.db)
self.conn = RedisClient(connection_pool=pool)
def close(self):
self.conn = None
return True
def key(self):
return '{}_{}_{}'.format(settings.OPPS_DB_NAME,
self.key_prefix,
self.key_sufix)
def save(self, document):
return self.conn.set(self.key(), document)
| Python | 0 |
b03b168cd752d50f1091106d3f4fcc0a79b22203 | Fix tests | siyavula/latex2image/tests/latex2image_tests.py | siyavula/latex2image/tests/latex2image_tests.py | # coding=utf-8
from unittest import TestCase
from lxml import etree, html
from siyavula.latex2image.imageutils import replace_latex_with_images
class TestBaseEquationToImageConversion(TestCase):
"""Test the equation to image conversion."""
def setUp(self):
self.element_input = etree.Element('xml')
self.div_input = etree.SubElement(self.element_input, 'div')
self.div_input.set('class', 'latex-math')
def test_complex_equation_to_png(self):
self.div_input.text = u'\\(\\begin{{aligned}} \\vec{{F}}_{{g}} & = m\\vec{{g}} \\\\ & = (\\text{{12,7}}\\ \\text{{kg}})(\\text{{9,8}}\\ \\text{{m·s$^{{-2}}$}}) \\\\ & = \\text{{124,46}}\\ \\text{{N}}\\text{{µ µ μ μ}} µ µ μ μ \\end{{aligned}}\\'.replace('{{', '{').replace('}}', '}')
xml = html.tostring(replace_latex_with_images(self.element_input, 'latex-math', '', ''))
self.assertEqual(xml, '<xml><div class="latex-math"><a href="/8996d7eee5c41cdf08aa8c0e9fe42e93.png"><img src="/8996d7eee5c41cdf08aa8c0e9fe42e93.png" srcset="/b0791f40d3207d55907aa0b7df78ca1e.png 2x"></a></div></xml>')
| # coding=utf-8
from unittest import TestCase
from lxml import etree
from siyavula.latex2image.imageutils import replace_latex_with_images
class TestBaseEquationToImageConversion(TestCase):
"""Test the equation to image conversion."""
def setUp(self):
self.element_input = etree.Element('xml')
self.div_input = etree.SubElement(self.element_input, 'div')
self.div_input.set('class', 'latex-math')
def test_complex_equation_to_png(self):
self.div_input.text = u'\\(\\begin{{aligned}} \\vec{{F}}_{{g}} & = m\\vec{{g}} \\\\ & = (\\text{{12,7}}\\ \\text{{kg}})(\\text{{9,8}}\\ \\text{{m·s$^{{-2}}$}}) \\\\ & = \\text{{124,46}}\\ \\text{{N}}\\text{{µ µ μ μ}} µ µ μ μ \\end{{aligned}}\\'.replace('{{', '{').replace('}}', '}')
self.assertEqual(replace_latex_with_images(self.element_input, 'latex-math', '', ''), None)
| Python | 0.000003 |
3fd74018c87ec598de173de7d13224523ee98ec5 | update LATEX_SUBS table | IPython/nbconvert/filters/latex.py | IPython/nbconvert/filters/latex.py | """Latex filters.
Module of useful filters for processing Latex within Jinja latex templates.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Latex substitutions for escaping latex.
# see: http://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates
LATEX_SUBS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
}
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
__all__ = ['escape_latex',
'strip_math_space']
def escape_latex(text):
"""
Escape characters that may conflict with latex.
Parameters
----------
text : str
Text containing characters that may conflict with Latex
"""
return ''.join([LATEX_SUBS.get(c, c) for c in text])
def strip_math_space(text):
"""
Remove the space between latex math commands and enclosing $ symbols.
This filter is important because latex isn't as flexible as the notebook
front end when it comes to flagging math using ampersand symbols.
Parameters
----------
text : str
Text to filter.
"""
# First, scan through the markdown looking for $. If
# a $ symbol is found, without a preceding \, assume
# it is the start of a math block. UNLESS that $ is
# not followed by another within two math_lines.
math_regions = []
math_lines = 0
within_math = False
math_start_index = 0
ptext = ''
last_character = ""
skip = False
for index, char in enumerate(text):
#Make sure the character isn't preceeded by a backslash
if (char == "$" and last_character != "\\"):
# Close the math region if this is an ending $
if within_math:
within_math = False
skip = True
ptext = ptext+'$'+text[math_start_index+1:index].strip()+'$'
math_regions.append([math_start_index, index+1])
else:
# Start a new math region
within_math = True
math_start_index = index
math_lines = 0
# If we are in a math region, count the number of lines parsed.
# Cancel the math region if we find two line breaks!
elif char == "\n":
if within_math:
math_lines += 1
if math_lines > 1:
within_math = False
ptext = ptext+text[math_start_index:index]
# Remember the last character so we can easily watch
# for backslashes
last_character = char
if not within_math and not skip:
ptext = ptext+char
if skip:
skip = False
return ptext
| """Latex filters.
Module of useful filters for processing Latex within Jinja latex templates.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Latex substitutions for escaping latex.
# see: http://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates
LATEX_SUBS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\letterunderscore{}',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}'}
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
__all__ = ['escape_latex',
'strip_math_space']
def escape_latex(text):
"""
Escape characters that may conflict with latex.
Parameters
----------
text : str
Text containing characters that may conflict with Latex
"""
return ''.join([LATEX_SUBS.get(c, c) for c in text])
def strip_math_space(text):
"""
Remove the space between latex math commands and enclosing $ symbols.
This filter is important because latex isn't as flexible as the notebook
front end when it comes to flagging math using ampersand symbols.
Parameters
----------
text : str
Text to filter.
"""
# First, scan through the markdown looking for $. If
# a $ symbol is found, without a preceding \, assume
# it is the start of a math block. UNLESS that $ is
# not followed by another within two math_lines.
math_regions = []
math_lines = 0
within_math = False
math_start_index = 0
ptext = ''
last_character = ""
skip = False
for index, char in enumerate(text):
#Make sure the character isn't preceeded by a backslash
if (char == "$" and last_character != "\\"):
# Close the math region if this is an ending $
if within_math:
within_math = False
skip = True
ptext = ptext+'$'+text[math_start_index+1:index].strip()+'$'
math_regions.append([math_start_index, index+1])
else:
# Start a new math region
within_math = True
math_start_index = index
math_lines = 0
# If we are in a math region, count the number of lines parsed.
# Cancel the math region if we find two line breaks!
elif char == "\n":
if within_math:
math_lines += 1
if math_lines > 1:
within_math = False
ptext = ptext+text[math_start_index:index]
# Remember the last character so we can easily watch
# for backslashes
last_character = char
if not within_math and not skip:
ptext = ptext+char
if skip:
skip = False
return ptext
| Python | 0 |
e8836b134c47080edaf47532d7cb844b307dfb08 | Add a guard against the task list changing when shutting down (#776) | zeroconf/_utils/aio.py | zeroconf/_utils/aio.py | """ Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import asyncio
import contextlib
import queue
from typing import List, Optional, Set, cast
def get_best_available_queue() -> queue.Queue:
"""Create the best available queue type."""
if hasattr(queue, "SimpleQueue"):
return queue.SimpleQueue() # type: ignore # pylint: disable=all
return queue.Queue()
# Switch to asyncio.wait_for once https://bugs.python.org/issue39032 is fixed
async def wait_event_or_timeout(event: asyncio.Event, timeout: float) -> None:
"""Wait for an event or timeout."""
loop = asyncio.get_event_loop()
future = loop.create_future()
def _handle_timeout() -> None:
if not future.done():
future.set_result(None)
timer_handle = loop.call_later(timeout, _handle_timeout)
event_wait = loop.create_task(event.wait())
def _handle_wait_complete(_: asyncio.Task) -> None:
if not future.done():
future.set_result(None)
event_wait.add_done_callback(_handle_wait_complete)
try:
await future
finally:
timer_handle.cancel()
if not event_wait.done():
event_wait.cancel()
with contextlib.suppress(asyncio.CancelledError):
await event_wait
async def _get_all_tasks(loop: asyncio.AbstractEventLoop) -> List[asyncio.Task]:
"""Return all tasks running."""
await asyncio.sleep(0) # flush out any call_soon_threadsafe
# Make a copy of the tasks in case they change during iteration
if hasattr(asyncio, 'all_tasks'):
return list(asyncio.all_tasks(loop)) # type: ignore # pylint: disable=no-member
return list(asyncio.Task.all_tasks(loop)) # type: ignore # pylint: disable=no-member
async def _wait_for_loop_tasks(wait_tasks: Set[asyncio.Task]) -> None:
"""Wait for the event loop thread we started to shutdown."""
await asyncio.wait(wait_tasks, timeout=1)
def shutdown_loop(loop: asyncio.AbstractEventLoop) -> None:
"""Wait for pending tasks and stop an event loop."""
pending_tasks = set(asyncio.run_coroutine_threadsafe(_get_all_tasks(loop), loop).result())
done_tasks = set(task for task in pending_tasks if not task.done())
pending_tasks -= done_tasks
if pending_tasks:
asyncio.run_coroutine_threadsafe(_wait_for_loop_tasks(pending_tasks), loop).result()
loop.call_soon_threadsafe(loop.stop)
# Remove the call to _get_running_loop once we drop python 3.6 support
def get_running_loop() -> Optional[asyncio.AbstractEventLoop]:
"""Check if an event loop is already running."""
with contextlib.suppress(RuntimeError):
if hasattr(asyncio, "get_running_loop"):
return cast(
asyncio.AbstractEventLoop,
asyncio.get_running_loop(), # type: ignore # pylint: disable=no-member # noqa
)
return asyncio._get_running_loop() # pylint: disable=no-member,protected-access
return None
| """ Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import asyncio
import contextlib
import queue
from typing import Optional, Set, cast
def get_best_available_queue() -> queue.Queue:
"""Create the best available queue type."""
if hasattr(queue, "SimpleQueue"):
return queue.SimpleQueue() # type: ignore # pylint: disable=all
return queue.Queue()
# Switch to asyncio.wait_for once https://bugs.python.org/issue39032 is fixed
async def wait_event_or_timeout(event: asyncio.Event, timeout: float) -> None:
"""Wait for an event or timeout."""
loop = asyncio.get_event_loop()
future = loop.create_future()
def _handle_timeout() -> None:
if not future.done():
future.set_result(None)
timer_handle = loop.call_later(timeout, _handle_timeout)
event_wait = loop.create_task(event.wait())
def _handle_wait_complete(_: asyncio.Task) -> None:
if not future.done():
future.set_result(None)
event_wait.add_done_callback(_handle_wait_complete)
try:
await future
finally:
timer_handle.cancel()
if not event_wait.done():
event_wait.cancel()
with contextlib.suppress(asyncio.CancelledError):
await event_wait
async def _get_all_tasks(loop: asyncio.AbstractEventLoop) -> Set[asyncio.Task]:
"""Return all tasks running."""
await asyncio.sleep(0) # flush out any call_soon_threadsafe
if hasattr(asyncio, 'all_tasks'):
return cast(Set[asyncio.Task], asyncio.all_tasks(loop)) # type: ignore # pylint: disable=no-member
return cast(Set[asyncio.Task], asyncio.Task.all_tasks(loop)) # type: ignore # pylint: disable=no-member
async def _wait_for_loop_tasks(wait_tasks: Set[asyncio.Task]) -> None:
"""Wait for the event loop thread we started to shutdown."""
await asyncio.wait(wait_tasks, timeout=1)
def shutdown_loop(loop: asyncio.AbstractEventLoop) -> None:
"""Wait for pending tasks and stop an event loop."""
pending_tasks = asyncio.run_coroutine_threadsafe(_get_all_tasks(loop), loop).result()
done_tasks = set(task for task in pending_tasks if not task.done())
pending_tasks -= done_tasks
if pending_tasks:
asyncio.run_coroutine_threadsafe(_wait_for_loop_tasks(pending_tasks), loop).result()
loop.call_soon_threadsafe(loop.stop)
# Remove the call to _get_running_loop once we drop python 3.6 support
def get_running_loop() -> Optional[asyncio.AbstractEventLoop]:
"""Check if an event loop is already running."""
with contextlib.suppress(RuntimeError):
if hasattr(asyncio, "get_running_loop"):
return cast(
asyncio.AbstractEventLoop,
asyncio.get_running_loop(), # type: ignore # pylint: disable=no-member # noqa
)
return asyncio._get_running_loop() # pylint: disable=no-member,protected-access
return None
| Python | 0.009597 |
3989abf6de879af6982a76ea3522f11f789c6569 | Increment version for speedup release | MarkovNetwork/_version.py | MarkovNetwork/_version.py | # -*- coding: utf-8 -*-
"""
Copyright 2016 Randal S. Olson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__version__ = '1.3'
| # -*- coding: utf-8 -*-
"""
Copyright 2016 Randal S. Olson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__version__ = '1.2'
| Python | 0 |
0a9bd97598bc63450bcf0956242d3b67e2a52d9b | Remove testing code | pysis/reqs/buildings/__init__.py | pysis/reqs/buildings/__init__.py | # -*- encoding: utf-8 -*-
from pysis.reqs.base import Request
from pysis.resources.buildings import Buildings
from pysis.resources.outputs import Outputs
from pysis.resources.blastcells import Blastcells
from pysis.resources.metrics import Metrics
class Get(Request):
uri = 'buildings/{id}'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings'
class GetOutputs(Request):
uri = 'buildings/{id}/outputs'
resource = Outputs
class GetBlastcells(Request):
uri = 'buildings/{id}/blastcells'
resource = Blastcells
class GetInfo(Request):
uri = 'buildings/{id}/info'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings/{id}/info'
class GetMetricsScores(Request):
uri = 'buildings/{id}/metrics/energystar'
resource = Metrics
def clean_uri(self):
if not self.id:
return 'buildings/{id}/metrics'
class GetMetrics(Request):
uri = 'buildings/{id}/metrics'
resource = Metrics
def clean_uri(self):
if not self.id:
return 'buildings/{id}/metrics'
class Set(Request):
uri = 'buildings/{id}'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings/{id}'
| # -*- encoding: utf-8 -*-
from pysis.reqs.base import Request
from pysis.resources.buildings import Buildings
from pysis.resources.outputs import Outputs
from pysis.resources.blastcells import Blastcells
from pysis.resources.metrics import Metrics
class Get(Request):
uri = 'buildings/{id}'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings'
class GetOutputs(Request):
uri = 'buildings/{id}/outputs'
resource = Outputs
class GetBlastcells(Request):
uri = 'buildings/{id}/blastcells'
resource = Blastcells
class GetInfo(Request):
uri = 'buildings/{id}/info'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings/{id}/info'
class GetMetricsScores(Request):
uri = 'buildings/{id}/metrics/energystar'
resource = Metrics
print vars(Request)
def clean_uri(self):
if not self.id:
return 'buildings/{id}/metrics'
class GetMetrics(Request):
uri = 'buildings/{id}/metrics'
resource = Metrics
def clean_uri(self):
if not self.id:
return 'buildings/{id}/metrics'
class Set(Request):
uri = 'buildings/{id}'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings/{id}'
| Python | 0.000002 |
dddb366dd56b85070d9ab51dab7a9ab7d317d1e5 | Include working directory path from settings | src/tenyksafk/main.py | src/tenyksafk/main.py | import sqlite3
from os.path import join
from tenyksservice import TenyksService, run_service
from tenyksservice.config import settings
class AFK(TenyksService):
direct_only = False
irc_message_filters = {
'depart': [r'^(?i)(xopa|away|afk|brb)'],
'return': [r'^(?i)(xoka|back)'],
'query': [r'(?P<nick>(.*))\?$'],
'list': [r'list']
}
def __init__(self, *args, **kwargs):
super(AFK, self).__init__(*args, **kwargs)
self.create_tables(self.fetch_cursor())
def handle_depart(self, data, match):
nick = data['nick']
if not self.user_exists(self.fetch_cursor(), nick):
self.create_user(self.fetch_cursor(), nick, True)
if not self.user_away(self.fetch_cursor(), nick):
self.send('{nick} is now AFK.'.format(nick=nick), data)
self.user_depart(self.fetch_cursor(), nick)
def handle_return(self, data, match):
nick = data['nick']
if not self.user_exists(self.fetch_cursor(), nick):
self.create_user(self.fetch_cursor(), nick, False)
if self.user_away(self.fetch_cursor(), nick):
self.send('{nick} is no longer AFK.'.format(nick=nick), data)
self.user_return(self.fetch_cursor(), nick)
def handle_query(self, data, match):
nick = match.groupdict()['nick']
if self.user_exists(self.fetch_cursor(), nick):
status = 'AFK' if self.user_away(self.fetch_cursor(), nick) else 'present'
self.send('{nick} is currently {status}.'.format(nick=nick, status=status), data)
else:
self.send('{nick}\'s status is unknown.'.format(nick=nick), data)
def handle_list(self, data, match):
afkers = self.fetch_afk(self.fetch_cursor())
if len(afkers) == 0:
self.send('There are currently no AFKers.', data)
else:
self.send('AFKers: {afk}'.format(afk=', '.join('%s' % nick for nick in afkers)), data)
def create_tables(self, cur):
table_sql = '''
CREATE TABLE IF NOT EXISTS afkers (
id INTEGER PRIMARY KEY,
nick TEXT,
away BOOLEAN
);
'''
cur.executescript(table_sql)
def fetch_cursor(self):
db_file = '{name}.db'.format(name=self.name)
conn = sqlite3.connect(join(settings.WORKING_DIR, db_file))
return conn.cursor()
def create_user(self, cur, nick, away=False):
result = cur.execute('''
INSERT INTO afkers (nick, away)
VALUES (?, ?)
''', (nick, away))
result.connection.commit()
def user_exists(self, cur, nick):
result = cur.execute('''
SELECT * FROM afkers
WHERE nick = ?
''', (nick,))
return result.fetchone() is not None
def user_depart(self, cur, nick):
result = cur.execute('''
UPDATE afkers SET away = ?
WHERE nick = ?
''', (True, nick))
result.connection.commit()
def user_return(self, cur, nick):
result = cur.execute('''
UPDATE afkers SET away = ?
WHERE nick = ?
''', (False, nick))
result.connection.commit()
def user_away(self, cur, nick):
result = cur.execute('''
SELECT away from afkers
WHERE nick = ?
''', (nick,))
return result.fetchone()[0]
def fetch_afk(self, cur):
result = cur.execute('''
SELECT nick FROM afkers
WHERE away = 1
ORDER BY nick ASC
''')
return result.fetchall();
def main():
run_service(AFK)
if __name__ == '__main__':
main()
| import sqlite3
from os.path import join
from tenyksservice import TenyksService, run_service
class AFK(TenyksService):
direct_only = False
irc_message_filters = {
'depart': [r'^(?i)(xopa|away|afk|brb)'],
'return': [r'^(?i)(xoka|back)'],
'query': [r'(?P<nick>(.*))\?$'],
'list': [r'list']
}
def __init__(self, *args, **kwargs):
super(AFK, self).__init__(*args, **kwargs)
self.create_tables(self.fetch_cursor())
def handle_depart(self, data, match):
nick = data['nick']
if not self.user_exists(self.fetch_cursor(), nick):
self.create_user(self.fetch_cursor(), nick, True)
if not self.user_away(self.fetch_cursor(), nick):
self.send('{nick} is now AFK.'.format(nick=nick), data)
self.user_depart(self.fetch_cursor(), nick)
def handle_return(self, data, match):
nick = data['nick']
if not self.user_exists(self.fetch_cursor(), nick):
self.create_user(self.fetch_cursor(), nick, False)
if self.user_away(self.fetch_cursor(), nick):
self.send('{nick} is no longer AFK.'.format(nick=nick), data)
self.user_return(self.fetch_cursor(), nick)
def handle_query(self, data, match):
nick = match.groupdict()['nick']
if self.user_exists(self.fetch_cursor(), nick):
status = 'AFK' if self.user_away(self.fetch_cursor(), nick) else 'present'
self.send('{nick} is currently {status}.'.format(nick=nick, status=status), data)
else:
self.send('{nick}\'s status is unknown.'.format(nick=nick), data)
def handle_list(self, data, match):
afkers = self.fetch_afk(self.fetch_cursor())
if len(afkers) == 0:
self.send('There are currently no AFKers.', data)
else:
self.send('AFKers: {afk}'.format(afk=', '.join('%s' % nick for nick in afkers)), data)
def create_tables(self, cur):
table_sql = '''
CREATE TABLE IF NOT EXISTS afkers (
id INTEGER PRIMARY KEY,
nick TEXT,
away BOOLEAN
);
'''
cur.executescript(table_sql)
def fetch_cursor(self):
db_file = '{name}.db'.format(name=self.name)
conn = sqlite3.connect(db_file)
return conn.cursor()
def create_user(self, cur, nick, away=False):
result = cur.execute('''
INSERT INTO afkers (nick, away)
VALUES (?, ?)
''', (nick, away))
result.connection.commit()
def user_exists(self, cur, nick):
result = cur.execute('''
SELECT * FROM afkers
WHERE nick = ?
''', (nick,))
return result.fetchone() is not None
def user_depart(self, cur, nick):
result = cur.execute('''
UPDATE afkers SET away = ?
WHERE nick = ?
''', (True, nick))
result.connection.commit()
def user_return(self, cur, nick):
result = cur.execute('''
UPDATE afkers SET away = ?
WHERE nick = ?
''', (False, nick))
result.connection.commit()
def user_away(self, cur, nick):
result = cur.execute('''
SELECT away from afkers
WHERE nick = ?
''', (nick,))
return result.fetchone()[0]
def fetch_afk(self, cur):
result = cur.execute('''
SELECT nick FROM afkers
WHERE away = 1
ORDER BY nick ASC
''')
return result.fetchall();
def main():
run_service(AFK)
if __name__ == '__main__':
main()
| Python | 0 |
d2ae65564c173789578c0119be7d1143d7c59641 | Fix mistaken variable name. | pybtex/style/formatting/__init__.py | pybtex/style/formatting/__init__.py | # Copyright (C) 2006, 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pybtex.core import FormattedEntry
from pybtex.style.template import node, join
from pybtex.richtext import Symbol, Text
from pybtex.plugin import find_plugin
@node
def toplevel(children, data):
return join(sep=Symbol('newblock')) [children].format_data(data)
class FormatterBase:
default_label_style = 'number'
default_name_style = 'plain'
def __init__(self, label_style=None, name_style=None, abbreviate_names=False, **kwargs):
if name_style is None:
name_style = find_plugin('pybtex.style.names', self.default_name_style)
if label_style is None:
label_style = find_plugin('pybtex.style.labels', self.default_label_style)
self.format_label = label_style.LabelStyle().format
self.format_name = name_style.NameStyle().format
self.abbreviate_names = abbreviate_names
def format_entries(self, entries):
for number, (key, entry) in enumerate(entries):
entry.number = number + 1
for persons in entry.persons.itervalues():
for person in persons:
person.text = self.format_name(person, self.abbreviate_names)
f = getattr(self, "format_" + entry.type)
text = f(entry)
label = self.format_label(entry)
yield FormattedEntry(key, text, label)
| # Copyright (C) 2006, 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pybtex.core import FormattedEntry
from pybtex.style.template import node, join
from pybtex.richtext import Symbol, Text
from pybtex.plugin import find_plugin
@node
def toplevel(children, data):
return join(sep=Symbol('newblock')) [children].format_data(data)
class FormatterBase:
default_label_style = 'number'
default_name_style = 'plain'
def __init__(self, label_style=None, name_style=None, abbreviate_names=False, **kwargs):
if name_style is None:
name_style = find_plugin('pybtex.style.names', self.default_name_style)
if label_style is None:
label_format = find_plugin('pybtex.style.labels', self.default_label_style)
self.format_label = label_style.LabelStyle().format
self.format_name = name_style.NameStyle().format
self.abbreviate_names = abbreviate_names
def format_entries(self, entries):
for number, (key, entry) in enumerate(entries):
entry.number = number + 1
for persons in entry.persons.itervalues():
for person in persons:
person.text = self.format_name(person, self.abbreviate_names)
f = getattr(self, "format_" + entry.type)
text = f(entry)
label = self.format_label(entry)
yield FormattedEntry(key, text, label)
| Python | 0.000063 |
43294bc83d013d79d909cadfcf2508aca0c575f6 | Fix for bad y param. | exp/sandbox/predictors/profile/DecisionTreeLearnerProfile.py | exp/sandbox/predictors/profile/DecisionTreeLearnerProfile.py |
import numpy
import logging
import sys
from apgl.util.ProfileUtils import ProfileUtils
from exp.sandbox.predictors.DecisionTreeLearner import DecisionTreeLearner
from apgl.data.ExamplesGenerator import ExamplesGenerator
from sklearn.tree import DecisionTreeRegressor
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
numpy.random.seed(22)
class DecisionTreeLearnerProfile(object):
def profileLearnModel(self):
numExamples = 1000
numFeatures = 50
minSplit = 10
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
y = numpy.array(y, numpy.float)
learner = DecisionTreeLearner(minSplit=minSplit, maxDepth=maxDepth, pruneType="REP-CV")
#learner.learnModel(X, y)
#print("Done")
ProfileUtils.profile('learner.learnModel(X, y) ', globals(), locals())
print(learner.getTree().getNumVertices())
def profileDecisionTreeRegressor(self):
numExamples = 1000
numFeatures = 20
minSplit = 10
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
regressor = DecisionTreeRegressor(min_split=minSplit, max_depth=maxDepth, min_density=0.0)
ProfileUtils.profile('regressor.fit(X, y)', globals(), locals())
def profilePredict(self):
#Make the prdiction function faster
numExamples = 1000
numFeatures = 20
minSplit = 1
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
learner = DecisionTreeLearner(minSplit=minSplit, maxDepth=maxDepth)
learner.learnModel(X, y)
print(learner.getTree().getNumVertices())
ProfileUtils.profile('learner.predict(X)', globals(), locals())
print(learner.getTree().getNumVertices())
profiler = DecisionTreeLearnerProfile()
profiler.profileLearnModel() #0.418
#profiler.profileDecisionTreeRegressor() #0.020
#profiler.profilePredict() #0.024
|
import numpy
import logging
import sys
from apgl.util.ProfileUtils import ProfileUtils
from exp.sandbox.predictors.DecisionTreeLearner import DecisionTreeLearner
from apgl.data.ExamplesGenerator import ExamplesGenerator
from sklearn.tree import DecisionTreeRegressor
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
numpy.random.seed(22)
class DecisionTreeLearnerProfile(object):
def profileLearnModel(self):
numExamples = 1000
numFeatures = 20
minSplit = 10
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
learner = DecisionTreeLearner(minSplit=minSplit, maxDepth=maxDepth, pruneType="REP-CV")
#learner.learnModel(X, y)
#print("Done")
ProfileUtils.profile('learner.learnModel(X, y) ', globals(), locals())
print(learner.getTree().getNumVertices())
def profileDecisionTreeRegressor(self):
numExamples = 1000
numFeatures = 20
minSplit = 10
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
regressor = DecisionTreeRegressor(min_split=minSplit, max_depth=maxDepth, min_density=0.0)
ProfileUtils.profile('regressor.fit(X, y)', globals(), locals())
def profilePredict(self):
#Make the prdiction function faster
numExamples = 1000
numFeatures = 20
minSplit = 1
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
learner = DecisionTreeLearner(minSplit=minSplit, maxDepth=maxDepth)
learner.learnModel(X, y)
print(learner.getTree().getNumVertices())
ProfileUtils.profile('learner.predict(X)', globals(), locals())
print(learner.getTree().getNumVertices())
profiler = DecisionTreeLearnerProfile()
profiler.profileLearnModel() #0.418
#profiler.profileDecisionTreeRegressor() #0.020
#profiler.profilePredict() #0.024
| Python | 0 |
cef60ed7b69b5aec75267ecfa609a5adab9045a8 | fix pycodestyle issue. | accelerator/migrations/0005_legalcheck_userlegalcheck.py | accelerator/migrations/0005_legalcheck_userlegalcheck.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-05-14 09:25
from __future__ import unicode_literals
import django.db.models.deletion
import swapper
from django.conf import settings
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0004_fix_acstream_contenttypes'),
]
operations = [
migrations.CreateModel(
name='LegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at',
models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(default='',
max_length=128,
unique=True)),
('description', models.TextField()),
],
options={
'verbose_name': 'Legal Check',
'db_table': 'accelerator_legalcheck',
'abstract': False,
'managed': True,
'swappable': swapper.swappable_setting('accelerator',
'LegalCheck'),
},
),
migrations.CreateModel(
name='UserLegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at',
models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('accepted', models.BooleanField(default=False)),
('legal_check',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='user_set',
to=swapper.get_model_name(
'accelerator', 'LegalCheck'))),
('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='legalcheck_set',
to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Legal Check',
'db_table': 'accelerator_userlegalcheck',
'abstract': False,
'managed': True,
'swappable': swapper.swappable_setting('accelerator',
'UserLegalCheck'),
},
),
migrations.AlterUniqueTogether(
name='userlegalcheck',
unique_together=set([('user', 'legal_check')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-05-14 09:25
from __future__ import unicode_literals
import django.db.models.deletion
import swapper
from django.conf import settings
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0004_fix_acstream_contenttypes'),
]
operations = [
migrations.CreateModel(
name='LegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at',
models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(default='',
max_length=128,
unique=True)),
('description', models.TextField()),
],
options={
'verbose_name': 'Legal Check',
'db_table': 'accelerator_legalcheck',
'abstract': False,
'managed': True,
'swappable': swapper.swappable_setting('accelerator',
'LegalCheck'),
},
),
migrations.CreateModel(
name='UserLegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at',
models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('accepted', models.BooleanField(default=False)),
('legal_check',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='user_set',
to=swapper.get_model_name(
'accelerator', 'LegalCheck'))),
('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='legalcheck_set',
to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Legal Check',
'db_table': 'accelerator_userlegalcheck',
'abstract': False,
'managed': True,
'swappable': swapper.swappable_setting('accelerator',
'UserLegalCheck'),
},
),
migrations.AlterUniqueTogether(
name='userlegalcheck',
unique_together=set([('user', 'legal_check')]),
),
]
| Python | 0 |
505d20b1f4de60bdb13810a989b5ea203553c850 | Remove use of np.true_divide | skbio/maths/subsample.py | skbio/maths/subsample.py | #!/usr/bin/env python
r"""
Subsampling (:mod:`skbio.maths.subsample`)
==========================================
.. currentmodule:: skbio.maths.subsample
This module provides functionality for subsampling from vectors of counts.
Functions
---------
.. autosummary::
:toctree: generated/
subsample
"""
from __future__ import division
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
def subsample(counts, n, replace=False):
"""Randomly subsample from a vector of counts.
Returns a copy of `counts` if `n` is equal to or larger than the number of
items in `counts`.
Parameters
----------
counts : 1-D array_like
Vector of counts (integers) to randomly subsample from.
n : int
Number of items to subsample from `counts`.
replace : bool, optional
If ``True``, subsample with replacement. If ``False`` (the default),
subsample without replacement.
Returns
-------
subsampled : ndarray
Subsampled vector of counts where the sum of the elements equals `n`
(i.e., ``subsampled.sum() == n``).
Raises
------
TypeError
If `counts` cannot be safely converted to an integer datatype.
Examples
--------
Subsample 4 items (without replacement) from a vector of counts:
>>> import numpy as np
>>> from skbio.maths.subsample import subsample
>>> a = np.array([4, 5, 0, 2, 1])
>>> sub = subsample(a, 4)
>>> sub.sum()
4
Trying to subsample an equal or greater number of items results in the same
vector as our input:
>>> subsample([0, 3, 0, 1], 8)
array([0, 3, 0, 1])
Subsample 5 items (with replacement):
>>> sub = subsample([1, 0, 1, 2, 2, 3, 0, 1], 5, replace=True)
>>> sub.sum()
5
"""
counts = np.asarray(counts)
counts = counts.astype(int, casting='safe')
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
counts_sum = counts.sum()
if counts_sum <= n:
return counts
if replace:
probs = counts / counts_sum
result = np.random.multinomial(n, probs)
else:
nz = counts.nonzero()[0]
unpacked = np.concatenate([np.repeat(np.array(i,), counts[i])
for i in nz])
permuted = np.random.permutation(unpacked)[:n]
result = np.zeros(len(counts), dtype=int)
for p in permuted:
result[p] += 1
return result
| #!/usr/bin/env python
r"""
Subsampling (:mod:`skbio.maths.subsample`)
==========================================
.. currentmodule:: skbio.maths.subsample
This module provides functionality for subsampling from vectors of counts.
Functions
---------
.. autosummary::
:toctree: generated/
subsample
"""
from __future__ import division
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
def subsample(counts, n, replace=False):
"""Randomly subsample from a vector of counts.
Returns a copy of `counts` if `n` is equal to or larger than the number of
items in `counts`.
Parameters
----------
counts : 1-D array_like
Vector of counts (integers) to randomly subsample from.
n : int
Number of items to subsample from `counts`.
replace : bool, optional
If ``True``, subsample with replacement. If ``False`` (the default),
subsample without replacement.
Returns
-------
subsampled : ndarray
Subsampled vector of counts where the sum of the elements equals `n`
(i.e., ``subsampled.sum() == n``).
Raises
------
TypeError
If `counts` cannot be safely converted to an integer datatype.
Examples
--------
Subsample 4 items (without replacement) from a vector of counts:
>>> import numpy as np
>>> from skbio.maths.subsample import subsample
>>> a = np.array([4, 5, 0, 2, 1])
>>> sub = subsample(a, 4)
>>> sub.sum()
4
Trying to subsample an equal or greater number of items results in the same
vector as our input:
>>> subsample([0, 3, 0, 1], 8)
array([0, 3, 0, 1])
Subsample 5 items (with replacement):
>>> sub = subsample([1, 0, 1, 2, 2, 3, 0, 1], 5, replace=True)
>>> sub.sum()
5
"""
counts = np.asarray(counts)
counts = counts.astype(int, casting='safe')
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
counts_sum = counts.sum()
if counts_sum <= n:
return counts
if replace:
probs = np.true_divide(counts, counts_sum)
result = np.random.multinomial(n, probs)
else:
nz = counts.nonzero()[0]
unpacked = np.concatenate([np.repeat(np.array(i,), counts[i])
for i in nz])
permuted = np.random.permutation(unpacked)[:n]
result = np.zeros(len(counts), dtype=int)
for p in permuted:
result[p] += 1
return result
| Python | 0 |
3de3e4bf2f0df0d602c2f69dd5a06016bf31eb9d | rebuild checkpoints when something breaks while updating group exports | couchexport/groupexports.py | couchexport/groupexports.py | from couchexport.models import GroupExportConfiguration, SavedBasicExport
from couchdbkit.exceptions import ResourceNotFound
from datetime import datetime
import os
import json
from couchexport.tasks import Temp, rebuild_schemas
from couchexport.export import SchemaMismatchException
from dimagi.utils.logging import notify_exception
def export_for_group(export_id, output_dir):
try:
config = GroupExportConfiguration.get(export_id)
except ResourceNotFound:
raise Exception("Couldn't find an export with id %s" % export_id)
for config, schema in config.all_exports:
try:
tmp, _ = schema.get_export_files(format=config.format)
except SchemaMismatchException, e:
# fire off a delayed force update to prevent this from happening again
rebuild_schemas.delay(config.index)
msg = "Saved export failed for group export {index}. The specific error is {msg}."
notify_exception(None, msg.format(index=config.index,
msg=str(e)))
# TODO: do we care enough to notify the user?
# This is typically only called by things like celerybeat.
continue
payload = Temp(tmp).payload
if output_dir == "couch":
saved = SavedBasicExport.view("couchexport/saved_exports",
key=json.dumps(config.index),
include_docs=True,
reduce=False).one()
if not saved:
saved = SavedBasicExport(configuration=config)
saved.save()
saved.put_attachment(payload, config.filename)
saved.last_updated = datetime.utcnow()
# force update the config in case it changed.
# redundant in the create case
saved.configuration = config
saved.save()
else:
with open(os.path.join(output_dir, config.filename), "wb") as f:
f.write(payload)
| from couchexport.models import GroupExportConfiguration, SavedBasicExport
from couchdbkit.exceptions import ResourceNotFound
from datetime import datetime
import os
import json
from couchexport.tasks import Temp
def export_for_group(export_id, output_dir):
try:
config = GroupExportConfiguration.get(export_id)
except ResourceNotFound:
raise Exception("Couldn't find an export with id %s" % export_id)
for config, schema in config.all_exports:
tmp, _ = schema.get_export_files(format=config.format)
payload = Temp(tmp).payload
if output_dir == "couch":
saved = SavedBasicExport.view("couchexport/saved_exports",
key=json.dumps(config.index),
include_docs=True,
reduce=False).one()
if not saved:
saved = SavedBasicExport(configuration=config)
saved.save()
saved.put_attachment(payload, config.filename)
saved.last_updated = datetime.utcnow()
# force update the config in case it changed.
# redundant in the create case
saved.configuration = config
saved.save()
else:
with open(os.path.join(output_dir, config.filename), "wb") as f:
f.write(payload)
| Python | 0 |
1746dad3e5bb218aede86cdb38e458a3f7ce270c | Update Inputkey.py | python/inputkeyboard/Inputkey.py | python/inputkeyboard/Inputkey.py | import sys, tty, termios
class _Getch:
def __call__(self, a):
return self._get_key(a)
def _get_key(self, a):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(a)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def arrow_get():
ARROW_KEY = {
'\x1b[A' :'up',
'\x1b[B' :'down',
'\x1b[C' :'right',
'\x1b[D' :'left',
'\x1b\x1b\x1b' :'esc'
}
while True:
raw_key = _Getch()
while True:
# 방향키 읽으려면 3으로 줘야함
# 이유: 방향키가 이스케이프문자포함해서 3자리
# 그런데 3으로 주면 일반문자 3자리쌓여야 출력함
input_key = raw_key(3)
if input_key != '':
break
if input_key in ARROW_KEY.keys():
return ARROW_KEY.get(input_key)
else:
continue
def get():
while True:
raw_key = _Getch()
while True:
input_key = raw_key(1)
if input_key != '':
break
return input_key
| import sys, tty, termios, time
class _Getch:
def __call__(self, a):
return self._get_key(a)
def _get_key(self, a):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(a)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def get():
while True:
raw_key = _Getch()
while True:
input_key = raw_key(1)
if input_key != '':
break
return input_key
def arrow_get():
ARROW_KEY = {
'\x1b[A' :'up',
'\x1b[B' :'down',
'\x1b[C' :'right',
'\x1b[D' :'left'
}
while True:
raw_key = _Getch()
while True:
# 방향키 읽으려면 3으로 줘야함
# 이유: 방향키가 이스케이프문자포함해서 3자리
# 그런데 3으로 주면 일반문자 3자리쌓여야 출력함
input_key = raw_key(3)
if input_key != '':
break
if input_key in ARROW_KEY.keys():
return ARROW_KEY.get(input_key)
else:
continue
| Python | 0.000002 |
2c43cf3368742d7bb0acb91118ff07aeb1fe4183 | Fix comment typo. | qipipe/staging/sarcoma_config.py | qipipe/staging/sarcoma_config.py | import os
from six.moves.configparser import ConfigParser as Config
from six.moves.configparser import NoOptionError
CFG_FILE = os.path.abspath(
os.path.join( os.path.dirname(__file__), '..', 'conf', 'sarcoma.cfg')
)
"""
The Sarcoma Tumor Location configuration file. This file contains
properties that associate the subject name to the location, e.g.::
Sarcoma004 = SHOULDER
The value is the SNOMED anatomy term.
"""
class ConfigError(Exception):
pass
def sarcoma_location(subject):
"""
:param subject: the XNAT Subject ID
:return: the subject tumor location
"""
try:
return sarcoma_config().get('Tumor Location', subject)
except NoOptionError:
raise ConfigError("Tumor location for subject %s was not found in the"
" sarcoma configuration file %s" % (subject, CFG_FILE))
def sarcoma_config():
"""
:return: the sarcoma configuration
:rtype: ConfigParser
"""
# Read the configuration file on demand.
if not hasattr(sarcoma_config, 'instance'):
sarcoma_config.instance = Config()
sarcoma_config.instance.read(CFG_FILE)
return sarcoma_config.instance
| import os
from six.moves.configparser import ConfigParser as Config
from six.moves.configparser import NoOptionError
CFG_FILE = os.path.abspath(
os.path.join( os.path.dirname(__file__), '..', 'conf', 'sarcoma.cfg')
)
"""
The Sarcoma Tumor Location configuration file. This file contains
properties that associat the subject name to the location, e.g.::
Sarcoma004 = SHOULDER
The value is the SNOMED anatomy term.
"""
class ConfigError(Exception):
pass
def sarcoma_location(subject):
"""
:param subject: the XNAT Subject ID
:return: the subject tumor location
"""
try:
return sarcoma_config().get('Tumor Location', subject)
except NoOptionError:
raise ConfigError("Tumor location for subject %s was not found in the"
" sarcoma configuration file %s" % (subject, CFG_FILE))
def sarcoma_config():
"""
:return: the sarcoma configuration
:rtype: ConfigParser
"""
# Read the configuration file on demand.
if not hasattr(sarcoma_config, 'instance'):
sarcoma_config.instance = Config()
sarcoma_config.instance.read(CFG_FILE)
return sarcoma_config.instance
| Python | 0 |
f0e07f97fd43a0f54c8b0996944038a07e9a0e96 | Add error handling for when the meter name does not match the NEM file | metering/loader.py | metering/loader.py | """
metering.loader
~~~~~~~~~
Define the meter data models
"""
import logging
from nemreader import read_nem_file
from sqlalchemy.orm import sessionmaker
from energy_shaper import split_into_daily_intervals
from . import get_db_engine
from . import save_energy_reading
from . import refresh_daily_stats
from . import refresh_monthly_stats
def load_nem_data(meter_id, nmi, nem_file):
""" Load data from NEM file and save to database """
engine = get_db_engine(meter_id)
Session = sessionmaker(bind=engine)
session = Session()
m = read_nem_file(nem_file)
try:
channels = m.readings[nmi]
except KeyError:
first_nmi = list(m.readings.keys())[0]
logging.warning('NMI of %s not found, using %s instead', nmi, first_nmi)
channels = m.readings[first_nmi]
for ch_name in channels.keys():
reads = split_into_daily_intervals(channels[ch_name])
for read in reads:
try:
quality_method = read[3]
except IndexError:
quality_method = None
save_energy_reading(session, ch_name,
read[0], read[1],
read[2], quality_method)
session.commit()
refresh_daily_stats(meter_id)
refresh_monthly_stats(meter_id)
| """
metering.loader
~~~~~~~~~
Define the meter data models
"""
from nemreader import read_nem_file
from sqlalchemy.orm import sessionmaker
from energy_shaper import split_into_daily_intervals
from . import get_db_engine
from . import save_energy_reading
from . import refresh_daily_stats
from . import refresh_monthly_stats
def load_nem_data(meter_id, nmi, nem_file):
""" Load data from NEM file and save to database """
engine = get_db_engine(meter_id)
Session = sessionmaker(bind=engine)
session = Session()
m = read_nem_file(nem_file)
channels = m.readings[nmi]
for ch_name in channels.keys():
reads = split_into_daily_intervals(channels[ch_name])
for read in reads:
try:
quality_method = read[3]
except IndexError:
quality_method = None
save_energy_reading(session, ch_name,
read[0], read[1],
read[2], quality_method)
session.commit()
refresh_daily_stats(meter_id)
refresh_monthly_stats(meter_id)
| Python | 0.000001 |
57a14c56305f3542e5383bb8189a298bb62f853a | remove qqq debug from wb_debug | Source/Common/wb_debug.py | Source/Common/wb_debug.py | '''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_debug.py
'''
import time
class WbDebugOption:
__slots__ = ('__enabled', '_log', '__name', '__fmt')
def __init__( self, log, name ):
assert log is not None
self.__enabled = False
self._log = log
self.__name = name
self.__fmt = '%s %%s' % (name,)
def __repr__( self ):
return '<WbDebugOption: %s enabled=%r>' % (self.__name, self.isEnabled())
def enable( self, state=True ):
self.__enabled = state
def isEnabled( self ):
return self.__enabled
def __call__( self, msg ):
if self.__enabled:
self._log.debug( self.__fmt % (msg,) )
class WbDebugSpeedOption(WbDebugOption):
__slots__ = ('__speed_start_time', '__speed_last_event_time')
def __init__( self, log, name ):
super().__init__( log, name )
self.__speed_start_time = time.time()
self.__speed_last_event_time = self.__speed_start_time
def __call__( self, msg, start_timer=False ):
if self.isEnabled():
now = time.time()
if start_timer:
self.__speed_start_time = now
self.__speed_last_event_time = now
start_delta = now - self.__speed_start_time
last_delta = now - self.__speed_last_event_time
self.__speed_last_event_time = now
self._log.debug( 'SPEED %.6f %.6f %s' % (start_delta, last_delta, msg,) )
class WbDebug:
def __init__( self, log ):
self._log = log
self._debugSpeed = WbDebugSpeedOption( self._log, 'SPEED' )
self._debugApp = self.addDebugOption( 'APP' )
self._debugThreading = self.addDebugOption( 'THREADING' )
self._debugMainWindow = self.addDebugOption( 'MAIN WINDOW' )
self._debugTreeModel = self.addDebugOption( 'TREE MODEL' )
self._debugTableModel = self.addDebugOption( 'TABLE MODEL' )
self._debugDiff = self.addDebugOption( 'DIFF' )
def setDebug( self, str_options ):
for option in [s.strip().lower() for s in str_options.split(',')]:
name = '_debug%s' % (''.join( s.capitalize() for s in option.lower().split('-') ),)
if hasattr( self, name ):
getattr( self, name ).enable( True )
else:
msg = 'Unknown debug option %s - see wb_debug.py for available options' % (option,)
print( msg )
def addDebugOption( self, name ):
return WbDebugOption( self._log, name )
| '''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_debug.py
'''
import time
class WbDebugOption:
__slots__ = ('__enabled', '_log', '__name', '__fmt')
def __init__( self, log, name ):
assert log is not None
self.__enabled = False
self._log = log
self.__name = name
self.__fmt = '%s %%s' % (name,)
def __repr__( self ):
return '<WbDebugOption: %s enabled=%r>' % (self.__name, self.isEnabled())
def enable( self, state=True ):
self.__enabled = state
def isEnabled( self ):
return self.__enabled
def __call__( self, msg ):
if self.__enabled:
self._log.debug( self.__fmt % (msg,) )
class WbDebugSpeedOption(WbDebugOption):
__slots__ = ('__speed_start_time', '__speed_last_event_time')
def __init__( self, log, name ):
super().__init__( log, name )
self.__speed_start_time = time.time()
self.__speed_last_event_time = self.__speed_start_time
def __call__( self, msg, start_timer=False ):
if self.isEnabled():
now = time.time()
if start_timer:
self.__speed_start_time = now
self.__speed_last_event_time = now
start_delta = now - self.__speed_start_time
last_delta = now - self.__speed_last_event_time
self.__speed_last_event_time = now
self._log.debug( 'SPEED %.6f %.6f %s' % (start_delta, last_delta, msg,) )
class WbDebug:
def __init__( self, log ):
self._log = log
self._debugSpeed = WbDebugSpeedOption( self._log, 'SPEED' )
self._debugApp = self.addDebugOption( 'APP' )
self._debugThreading = self.addDebugOption( 'THREADING' )
self._debugMainWindow = self.addDebugOption( 'MAIN WINDOW' )
self._debugTreeModel = self.addDebugOption( 'TREE MODEL' )
self._debugTableModel = self.addDebugOption( 'TABLE MODEL' )
self._debugDiff = self.addDebugOption( 'DIFF' )
def setDebug( self, str_options ):
print( 'qqq setDebug str_options %r' % (str_options,) )
for option in [s.strip().lower() for s in str_options.split(',')]:
name = '_debug%s' % (''.join( s.capitalize() for s in option.lower().split('-') ),)
print( 'qqq setDebug name %r' % (name,) )
if hasattr( self, name ):
getattr( self, name ).enable( True )
else:
msg = 'Unknown debug option %s - see wb_debug.py for available options' % (option,)
print( msg )
def addDebugOption( self, name ):
return WbDebugOption( self._log, name )
| Python | 0.000622 |
6a1b5003547833ffb0cddea933594c0322ad1bf2 | Add complete utils instead | frappe/social/doctype/energy_point_rule/energy_point_rule.py | frappe/social/doctype/energy_point_rule/energy_point_rule.py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.cache_manager
from frappe.model.document import Document
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
from frappe.social.doctype.energy_point_log.energy_point_log import create_energy_points_log, revert
class EnergyPointRule(Document):
def on_update(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def on_trash(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def apply(self, doc):
whitelisted_globals = {
"utils": frappe.utils
}
if frappe.safe_eval(self.condition, whitelisted_globals, {'doc': doc.as_dict()}):
multiplier = 1
if self.multiplier_field:
multiplier = doc.get(self.multiplier_field) or 1
points = round(self.points * multiplier)
reference_doctype = doc.doctype
reference_name = doc.name
user = doc.get(self.user_field)
rule = self.name
# incase of zero as result after roundoff
if not points: return
# if user_field has no value
if not user or user == 'Administrator': return
try:
create_energy_points_log(reference_doctype, reference_name, {
'points': points,
'user': user,
'rule': rule
})
except Exception as e:
frappe.log_error(frappe.get_traceback(), 'apply_energy_point')
def process_energy_points(doc, state):
if (frappe.flags.in_patch
or frappe.flags.in_install
or not is_energy_point_enabled()):
return
old_doc = doc.get_doc_before_save()
# check if doc has been cancelled
if old_doc and old_doc.docstatus == 1 and doc.docstatus == 2:
return revert_points_for_cancelled_doc(doc)
for d in frappe.cache_manager.get_doctype_map('Energy Point Rule', doc.doctype,
dict(reference_doctype = doc.doctype, enabled=1)):
frappe.get_doc('Energy Point Rule', d.get('name')).apply(doc)
def revert_points_for_cancelled_doc(doc):
energy_point_logs = frappe.get_all('Energy Point Log', {
'reference_doctype': doc.doctype,
'reference_name': doc.name,
'type': 'Auto'
})
for log in energy_point_logs:
revert(log.name, _('Reference document has been cancelled'))
def get_energy_point_doctypes():
return [
d.reference_doctype for d in frappe.get_all('Energy Point Rule',
['reference_doctype'], {'enabled': 1})
]
| # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.cache_manager
from frappe.model.document import Document
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
from frappe.social.doctype.energy_point_log.energy_point_log import create_energy_points_log, revert
class EnergyPointRule(Document):
def on_update(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def on_trash(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def apply(self, doc):
whitelisted_globals = {
"getdate": frappe.utils.getdate
}
if frappe.safe_eval(self.condition, whitelisted_globals, {'doc': doc.as_dict()}):
multiplier = 1
if self.multiplier_field:
multiplier = doc.get(self.multiplier_field) or 1
points = round(self.points * multiplier)
reference_doctype = doc.doctype
reference_name = doc.name
user = doc.get(self.user_field)
rule = self.name
# incase of zero as result after roundoff
if not points: return
# if user_field has no value
if not user or user == 'Administrator': return
try:
create_energy_points_log(reference_doctype, reference_name, {
'points': points,
'user': user,
'rule': rule
})
except Exception as e:
frappe.log_error(frappe.get_traceback(), 'apply_energy_point')
def process_energy_points(doc, state):
if (frappe.flags.in_patch
or frappe.flags.in_install
or not is_energy_point_enabled()):
return
old_doc = doc.get_doc_before_save()
# check if doc has been cancelled
if old_doc and old_doc.docstatus == 1 and doc.docstatus == 2:
return revert_points_for_cancelled_doc(doc)
for d in frappe.cache_manager.get_doctype_map('Energy Point Rule', doc.doctype,
dict(reference_doctype = doc.doctype, enabled=1)):
frappe.get_doc('Energy Point Rule', d.get('name')).apply(doc)
def revert_points_for_cancelled_doc(doc):
energy_point_logs = frappe.get_all('Energy Point Log', {
'reference_doctype': doc.doctype,
'reference_name': doc.name,
'type': 'Auto'
})
for log in energy_point_logs:
revert(log.name, _('Reference document has been cancelled'))
def get_energy_point_doctypes():
return [
d.reference_doctype for d in frappe.get_all('Energy Point Rule',
['reference_doctype'], {'enabled': 1})
]
| Python | 0 |
c906e675bb4c75286d98d78e4625d12a158652c7 | Update accel.py | apps/accelerometer/accel.py | apps/accelerometer/accel.py | #!/usr/bin/python
# Author : ipmstyle, https://github.com/ipmstyle
# : jeonghoonkang, https://github.com/jeonghoonkang
# for the detail of HW connection, see lcd_connect.py
import sys
from time import strftime, localtime
# beware the dir location, it should exist
sys.path.append("../lcd_berepi/lib")
sys.path.append("../sht20")
from lcd import *
from sht25class import *
def main():
# Initialise display
lcd_init()
#print ip_chk(), wip_chk(), mac_chk(), wmac_chk(), stalk_chk(), time_chk()
while True:
str = ip_chk()
str = str[:-1]
lcd_string('%s ET' %str,LCD_LINE_1,1)
str = mac_chk()
str = str[:-1]
lcd_string('%s' % (tstr),LCD_LINE_1,1)
str = humi_chk()
lcd_string('%.5s ' % (str),LCD_LINE_2,1)
whiteLCDon()
time.sleep(2)
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def temp_chk():
temperature = getTemperature()
return temperature
def humi_chk():
humidity = getHumidity()
return humidity
def time_chk():
time = strftime("%Y-%m%d %H:%M", localtime())
return time
def ip_chk():
cmd = "ip addr show eth0 | grep inet | awk '$2 !~ /^169/ {print $2}' | cut -d/ -f1"
ipAddr = run_cmd(cmd)
return ipAddr
def wip_chk():
cmd = "ip addr show wlan0 | grep inet | awk '{print $2}' | cut -d/ -f1"
wipAddr = run_cmd(cmd)
return wipAddr
def mac_chk():
cmd = "ifconfig -a | grep ^eth | awk '{print $5}'"
macAddr = run_cmd(cmd)
return macAddr
def wmac_chk():
cmd = "ifconfig -a | grep ^wlan | awk '{print $5}'"
wmacAddr = run_cmd(cmd)
return wmacAddr
def stalk_chk():
cmd = "hostname"
return run_cmd(cmd)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
lcd_string("Goodbye!",LCD_LINE_1,2)
GPIO.cleanup()
| #!/usr/bin/python
# Author : ipmstyle, https://github.com/ipmstyle
# : jeonghoonkang, https://github.com/jeonghoonkang
# for the detail of HW connection, see lcd_connect.py
import sys
from time import strftime, localtime
# beware the dir location, it should exist
sys.path.append("../lcd_berepi/lib")
sys.path.append("../sht20")
from lcd import *
from sht25class import *
def main():
# Initialise display
lcd_init()
#print ip_chk(), wip_chk(), mac_chk(), wmac_chk(), stalk_chk(), time_chk()
while True:
str = ip_chk()
str = str[:-1]
lcd_string('%s ET' %str,LCD_LINE_1,1)
str = mac_chk()
str = str[:-1]
str = wip_chk()
str = str[:-1]
lcd_string('%s WL ' % (str),LCD_LINE_2,1)
str = wmac_chk()
str = str[:-1]
# lcd_string('%s' % (str),LCD_LINE_2,1)
blueLCDon()
time.sleep(1.2)
str = stalk_chk()
str = str[:-1]
lcd_string('%s' % (tstr),LCD_LINE_1,1)
lcd_string('%s ' % (str),LCD_LINE_2,1)
blueLCDon()
time.sleep(1)
lcd_string('%s' % (tstr),LCD_LINE_1,1)
str = humi_chk()
lcd_string('%.5s ' % (str),LCD_LINE_2,1)
whiteLCDon()
time.sleep(2)
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def temp_chk():
temperature = getTemperature()
return temperature
def humi_chk():
humidity = getHumidity()
return humidity
def time_chk():
time = strftime("%Y-%m%d %H:%M", localtime())
return time
def ip_chk():
cmd = "ip addr show eth0 | grep inet | awk '$2 !~ /^169/ {print $2}' | cut -d/ -f1"
ipAddr = run_cmd(cmd)
return ipAddr
def wip_chk():
cmd = "ip addr show wlan0 | grep inet | awk '{print $2}' | cut -d/ -f1"
wipAddr = run_cmd(cmd)
return wipAddr
def mac_chk():
cmd = "ifconfig -a | grep ^eth | awk '{print $5}'"
macAddr = run_cmd(cmd)
return macAddr
def wmac_chk():
cmd = "ifconfig -a | grep ^wlan | awk '{print $5}'"
wmacAddr = run_cmd(cmd)
return wmacAddr
def stalk_chk():
cmd = "hostname"
return run_cmd(cmd)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
lcd_string("Goodbye!",LCD_LINE_1,2)
GPIO.cleanup()
| Python | 0.000001 |
1ed14e9231d295c6db83337f7cf2b586a39dc3dc | Add timestamp to payment log list display | apps/cowry_docdata/admin.py | apps/cowry_docdata/admin.py | from babel.numbers import format_currency
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils import translation
from .models import DocDataPaymentOrder, DocDataPayment, DocDataPaymentLogEntry
class DocDataPaymentLogEntryInine(admin.TabularInline):
model = DocDataPaymentLogEntry
can_delete = False
extra = 0
max_num = 0
fields = ('timestamp', 'level', 'message')
readonly_fields = fields
class DocDataPaymentInline(admin.TabularInline):
model = DocDataPayment
can_delete = False
extra = 0
max_num = 0
fields = ('payment_method', 'status', 'created', 'updated')
readonly_fields = fields
class DocDataPaymentOrderAdmin(admin.ModelAdmin):
list_filter = ('status',)
list_display = ('created', 'amount_override', 'status')
raw_id_fields = ('order',)
search_fields = ('payment_order_id', 'merchant_order_reference')
inlines = (DocDataPaymentInline, DocDataPaymentLogEntryInine)
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100, obj.currency, locale=language)
amount_override.short_description = 'amount'
admin.site.register(DocDataPaymentOrder, DocDataPaymentOrderAdmin)
class DocDataPaymentLogEntryAdmin(admin.ModelAdmin):
# List view.
list_display = ('payment', 'timestamp', 'level', 'message')
list_filter = ('level', 'timestamp')
search_fields = ('message',)
def payment(self, obj):
payment = obj.docdata_payment_order
url = reverse('admin:%s_%s_change' % (payment._meta.app_label, payment._meta.module_name), args=[payment.id])
return "<a href='%s'>%s</a>" % (str(url), payment)
payment.allow_tags = True
# Don't allow the detail view to be accessed.
def has_change_permission(self, request, obj=None):
if not obj:
return True
return False
admin.site.register(DocDataPaymentLogEntry, DocDataPaymentLogEntryAdmin)
| from babel.numbers import format_currency
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils import translation
from .models import DocDataPaymentOrder, DocDataPayment, DocDataPaymentLogEntry
class DocDataPaymentLogEntryInine(admin.TabularInline):
model = DocDataPaymentLogEntry
can_delete = False
extra = 0
max_num = 0
fields = ('timestamp', 'level', 'message')
readonly_fields = fields
class DocDataPaymentInline(admin.TabularInline):
model = DocDataPayment
can_delete = False
extra = 0
max_num = 0
fields = ('payment_method', 'status', 'created', 'updated')
readonly_fields = fields
class DocDataPaymentOrderAdmin(admin.ModelAdmin):
list_filter = ('status',)
list_display = ('created', 'amount_override', 'status')
raw_id_fields = ('order',)
search_fields = ('payment_order_id', 'merchant_order_reference')
inlines = (DocDataPaymentInline, DocDataPaymentLogEntryInine)
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100, obj.currency, locale=language)
amount_override.short_description = 'amount'
admin.site.register(DocDataPaymentOrder, DocDataPaymentOrderAdmin)
class DocDataPaymentLogEntryAdmin(admin.ModelAdmin):
# List view.
list_display = ('payment', 'level', 'message')
list_filter = ('level', 'timestamp')
search_fields = ('message',)
def payment(self, obj):
payment = obj.docdata_payment_order
url = reverse('admin:%s_%s_change' % (payment._meta.app_label, payment._meta.module_name), args=[payment.id])
return "<a href='%s'>%s</a>" % (str(url), payment)
payment.allow_tags = True
# Don't allow the detail view to be accessed.
def has_change_permission(self, request, obj=None):
if not obj:
return True
return False
admin.site.register(DocDataPaymentLogEntry, DocDataPaymentLogEntryAdmin)
| Python | 0.000001 |
8064be72de340fca963da2cade2b73aa969fbdbd | Add string representation for Activity model | csunplugged/activities/models.py | csunplugged/activities/models.py | from django.db import models
class Activity(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.name
| from django.db import models
class Activity(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
| Python | 0.000039 |
dd4e62667da94469a8bbb6dd0ccd881124e7665f | Fix return value of terraform.render | src/buildercore/terraform.py | src/buildercore/terraform.py | import json
from buildercore.utils import ensure
RESOURCE_TYPE_FASTLY = 'fastly_service_v1'
RESOURCE_NAME_FASTLY = 'fastly-cdn'
def render(context):
if not context['fastly']:
return '{}'
ensure(len(context['fastly']['subdomains']) == 1, "Only 1 subdomain for Fastly CDNs is supported")
tf_file = {
'resource': {
RESOURCE_TYPE_FASTLY: {
# must be unique but only in a certain context like this, use some constants
RESOURCE_NAME_FASTLY: {
'name': context['stackname'],
'domain': {
'name': context['fastly']['subdomains'][0],
},
'backend': {
'address': context['full_hostname'],
'name': context['stackname'],
'port': 443,
'use_ssl': True,
'ssl_check_cert': False # bad option
# it's for minimal fuss. Before we start customizing this, a lot of the risk to be tackled
# is integrating everything together with a good lifecycle for adding, modifying and removing
# CDNs that point to CloudFormation-managed resources.
},
'force_destroy': True
}
}
},
}
return json.dumps(tf_file)
| import json
from buildercore.utils import ensure
RESOURCE_TYPE_FASTLY = 'fastly_service_v1'
RESOURCE_NAME_FASTLY = 'fastly-cdn'
def render(context):
if not context['fastly']:
return None
ensure(len(context['fastly']['subdomains']) == 1, "Only 1 subdomain for Fastly CDNs is supported")
tf_file = {
'resource': {
RESOURCE_TYPE_FASTLY: {
# must be unique but only in a certain context like this, use some constants
RESOURCE_NAME_FASTLY: {
'name': context['stackname'],
'domain': {
'name': context['fastly']['subdomains'][0],
},
'backend': {
'address': context['full_hostname'],
'name': context['stackname'],
'port': 443,
'use_ssl': True,
'ssl_check_cert': False # bad option
# it's for minimal fuss. Before we start customizing this, a lot of the risk to be tackled
# is integrating everything together with a good lifecycle for adding, modifying and removing
# CDNs that point to CloudFormation-managed resources.
},
'force_destroy': True
}
}
},
}
return json.dumps(tf_file)
| Python | 0.000126 |
1472e4204e9a654a2296f690e8420c97ef98fb7c | Read device entity id from config file | senic_hub/nuimo_app/components/__init__.py | senic_hub/nuimo_app/components/__init__.py | import logging
from pprint import pformat
from threading import Thread
from .. import matrices
from ..hass import HomeAssistant
logger = logging.getLogger(__name__)
def clamp_value(value, range_):
return min(max(value, range_.start), range_.stop)
class BaseComponent:
MATRIX = matrices.ERROR
def __init__(self, config):
self.name = config['name']
def start(self):
pass
def stop(self):
pass
class ThreadComponent(BaseComponent):
def __init__(self, config):
super().__init__(config)
self.stopping = True
self.thread = None
def run(self):
"""
Concrete components must implement run() method
"""
raise NotImplementedError()
def start(self):
self.stopping = False
self.thread = Thread(target=self.run)
self.thread.start()
def stop(self):
self.stopping = True
class HomeAssistantComponent(BaseComponent):
def __init__(self, ha_domain, config):
super().__init__(config)
self.is_on = False
# TODO: Parametrize HA's address?
self._ha_address = "localhost:8123"
self._ha_entity_id = config['entity_id']
self._ha_domain = ha_domain
def start(self):
super().start()
# TODO: Provide single HA instance to all HA-based components
self._ha = HomeAssistant(self._ha_address, on_connect=self._ha_connected, on_disconnect=self._ha_disconnected)
self._ha.start()
self._ha.register_state_listener(self._ha_entity_id, self._ha_state_changed)
def stop(self):
super().stop()
self._ha.stop()
self._ha.unregister_state_listener(self._ha_entity_id)
def run(self):
pass
def update_from_ha_state(self, state):
self.is_on = state.get('state', None) != 'off'
def call_ha_service(self, service, data={}, on_success=None, on_error=None):
def _on_success(result):
logger.debug("Calling service %s:%s succeeded with result: %s", self._ha_domain, service, pformat(result))
def _on_error():
logger.debug("Failed calling service %s:%s", self._ha_domain, service)
data["entity_id"] = self._ha_entity_id
logger.debug("Call service %s:%s with data: %s", self._ha_domain, service, pformat(data))
self._ha.call_service(self._ha_domain, service, data, on_success or _on_success, on_error or _on_error)
def _ha_connected(self):
def on_state_retrieved(state):
self._ha_state_changed(state)
def on_state_retrieve_failed():
logger.debug("HA get state failed")
self._ha.get_state(self._ha_entity_id, on_state_retrieved, on_state_retrieve_failed)
def _ha_disconnected(self):
pass
def _ha_state_changed(self, state):
if "data" in state:
self.update_from_ha_state(state["data"]["new_state"])
else:
self.update_from_ha_state(state)
| import logging
from pprint import pformat
from threading import Thread
from .. import matrices
from ..hass import HomeAssistant
logger = logging.getLogger(__name__)
def clamp_value(value, range_):
return min(max(value, range_.start), range_.stop)
class BaseComponent:
MATRIX = matrices.ERROR
def __init__(self, config):
self.name = config['name']
def start(self):
pass
def stop(self):
pass
class ThreadComponent(BaseComponent):
def __init__(self, config):
super().__init__(config)
self.stopping = True
self.thread = None
def run(self):
"""
Concrete components must implement run() method
"""
raise NotImplementedError()
def start(self):
self.stopping = False
self.thread = Thread(target=self.run)
self.thread.start()
def stop(self):
self.stopping = True
class HomeAssistantComponent(BaseComponent):
def __init__(self, ha_domain, config):
super().__init__(config)
self.is_on = False
# TODO: Parametrize HA's address?
self._ha_address = "localhost:8123"
# TODO: Read entity id from config
self._ha_entity_id = "media_player.office"
self._ha_domain = ha_domain
def start(self):
super().start()
# TODO: Provide single HA instance to all HA-based components
self._ha = HomeAssistant(self._ha_address, on_connect=self._ha_connected, on_disconnect=self._ha_disconnected)
self._ha.start()
self._ha.register_state_listener(self._ha_entity_id, self._ha_state_changed)
def stop(self):
super().stop()
self._ha.stop()
self._ha.unregister_state_listener(self._ha_entity_id)
def run(self):
pass
def update_from_ha_state(self, state):
self.is_on = state.get('state', None) != 'off'
def call_ha_service(self, service, data={}, on_success=None, on_error=None):
def _on_success(result):
logger.debug("Calling service %s:%s succeeded with result: %s", self._ha_domain, service, pformat(result))
def _on_error():
logger.debug("Failed calling service %s:%s", self._ha_domain, service)
data["entity_id"] = self._ha_entity_id
logger.debug("Call service %s:%s with data: %s", self._ha_domain, service, pformat(data))
self._ha.call_service(self._ha_domain, service, data, on_success or _on_success, on_error or _on_error)
def _ha_connected(self):
def on_state_retrieved(state):
self._ha_state_changed(state)
def on_state_retrieve_failed():
logger.debug("HA get state failed")
self._ha.get_state(self._ha_entity_id, on_state_retrieved, on_state_retrieve_failed)
def _ha_disconnected(self):
pass
def _ha_state_changed(self, state):
if "data" in state:
self.update_from_ha_state(state["data"]["new_state"])
else:
self.update_from_ha_state(state)
| Python | 0 |
a98e536334eb3d3376efe93c1bdc639ecdc4a2a0 | remove unused code | approvaltests/reporters/generic_diff_reporter_factory.py | approvaltests/reporters/generic_diff_reporter_factory.py | import json
from approvaltests.reporters.generic_diff_reporter import GenericDiffReporter
from approvaltests.utils import get_adjacent_file
class GenericDiffReporterFactory(object):
reporters = []
def __init__(self):
self.load(get_adjacent_file('reporters.json'))
self.add_fallback_reporter_config(["PythonNative", "python", [get_adjacent_file("python_native_reporter.py")]])
def add_fallback_reporter_config(self, config):
self.reporters.append(config)
def list(self):
return [r[0] for r in self.reporters]
def get(self, reporter_name):
config = next((r for r in self.reporters if r[0] == reporter_name), None)
return self._create_reporter(config)
@staticmethod
def _create_reporter(config):
if not config:
return None
return GenericDiffReporter(config)
def save(self, file_name):
with open(file_name, 'w') as f:
json.dump(
self.reporters,
f,
sort_keys=True,
indent=2,
separators=(',', ': ')
)
return file_name
def load(self, file_name):
with open(file_name, 'r') as f:
self.reporters = json.load(f)
return self.reporters
def get_first_working(self):
working = (i for i in self.get_all_reporters() if i.is_working())
return next(working, None)
def get_all_reporters(self):
instances = (self._create_reporter(r) for r in self.reporters)
return instances
def remove(self, reporter_name):
self.reporters = [r for r in self.reporters if r[0] != reporter_name]
| import json
from approvaltests.reporters.generic_diff_reporter import GenericDiffReporter
from approvaltests.utils import get_adjacent_file
class GenericDiffReporterFactory(object):
reporters = []
def __init__(self):
self.load(get_adjacent_file('reporters.json'))
self.add_fallback_reporter_config(["PythonNative", "python", [get_adjacent_file("python_native_reporter.py")]])
def add_default_reporter_config(self, config):
self.reporters.insert(0, config)
def add_fallback_reporter_config(self, config):
self.reporters.append(config)
def list(self):
return [r[0] for r in self.reporters]
def get(self, reporter_name):
config = next((r for r in self.reporters if r[0] == reporter_name), None)
return self._create_reporter(config)
@staticmethod
def _create_reporter(config):
if not config:
return None
return GenericDiffReporter(config)
def save(self, file_name):
with open(file_name, 'w') as f:
json.dump(
self.reporters,
f,
sort_keys=True,
indent=2,
separators=(',', ': ')
)
return file_name
def load(self, file_name):
with open(file_name, 'r') as f:
self.reporters = json.load(f)
return self.reporters
def get_first_working(self):
working = (i for i in self.get_all_reporters() if i.is_working())
return next(working, None)
def get_all_reporters(self):
instances = (self._create_reporter(r) for r in self.reporters)
return instances
def remove(self, reporter_name):
self.reporters = [r for r in self.reporters if r[0] != reporter_name]
| Python | 0.000017 |
c5422645773b43de8811c691dfe03c82eda0b935 | put cflags into configure | robustus/detail/install_protobuf.py | robustus/detail/install_protobuf.py | # =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import logging
import os
from requirement import RequirementException
from utility import unpack, safe_remove, run_shell, ln
import shutil
import subprocess
def install(robustus, requirement_specifier, rob_file, ignore_index):
cwd = os.getcwd()
os.chdir(robustus.cache)
install_dir = os.path.join(robustus.cache, 'protobuf-%s' % requirement_specifier.version)
if not os.path.isdir(install_dir) and not ignore_index:
archive_name = 'protobuf-%s.tar.gz' % requirement_specifier.version
run_shell(['wget', '-c', 'https://protobuf.googlecode.com/svn/rc/%s' % (archive_name,)],
verbose=robustus.settings['verbosity'] >= 1)
run_shell(['tar', 'zxvf', archive_name],
verbose=robustus.settings['verbosity'] >= 1)
# move sources to a folder in order to use a clean name for installation
src_dir = 'protobuf-%s' % requirement_specifier.version
shutil.move(src_dir, src_dir+'_src')
src_dir += '_src'
os.chdir(src_dir)
os.mkdir(install_dir)
retcode = run_shell(['./configure', '--disable-shared',
'CFLAGS=-fPIC',
'CXXFLAGS=-fPIC',
'--prefix', install_dir],
verbose=robustus.settings['verbosity'] >= 1)
if retcode:
raise RequirementException('Failed to configure protobuf compilation')
retcode = run_shell('make', shell=True,
verbose=robustus.settings['verbosity'] >= 1)
if retcode:
raise RequirementException('Failed compile protobuf')
retcode = run_shell('make install', shell=True)
if retcode:
raise RequirementException('Failed install protobuf')
os.chdir(robustus.cache)
shutil.rmtree(src_dir)
venv_install_folder = os.path.join(robustus.env, 'protobuf')
if os.path.exists(venv_install_folder):
shutil.rmtree(venv_install_folder)
shutil.copytree(install_dir, venv_install_folder)
executable_path = os.path.join(install_dir, 'bin', 'protoc')
ln(executable_path, os.path.join(robustus.env, 'bin', 'protoc'), force=True)
os.chdir(cwd)
# now install python part
robustus.install_through_wheeling(requirement_specifier, rob_file, ignore_index)
| # =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import logging
import os
from requirement import RequirementException
from utility import unpack, safe_remove, run_shell, ln
import shutil
import subprocess
def install(robustus, requirement_specifier, rob_file, ignore_index):
cwd = os.getcwd()
os.chdir(robustus.cache)
install_dir = os.path.join(robustus.cache, 'protobuf-%s' % requirement_specifier.version)
if not os.path.isdir(install_dir) and not ignore_index:
archive_name = 'protobuf-%s.tar.gz' % requirement_specifier.version
subprocess.call(['wget', '-c', 'https://protobuf.googlecode.com/svn/rc/%s' % (archive_name,)])
subprocess.call(['tar', 'zxvf', archive_name])
# move sources to a folder in order to use a clean name for installation
src_dir = 'protobuf-%s' % requirement_specifier.version
shutil.move(src_dir, src_dir+'_src')
src_dir += '_src'
os.chdir(src_dir)
os.mkdir(install_dir)
old_cflags = os.environ['CFLAGS']
os.environ['CFLAGS'] = '-fPIC'
retcode = run_shell(['./configure', '--disable-shared', '--prefix', install_dir],
verbose=robustus.settings['verbosity'] >= 1)
os.environ['CFLAGS'] = old_cflags
if retcode:
raise RequirementException('Failed to configure protobuf compilation')
retcode = run_shell('make', shell=True,
verbose=robustus.settings['verbosity'] >= 1)
if retcode:
raise RequirementException('Failed compile protobuf')
retcode = run_shell('make install', shell=True)
if retcode:
raise RequirementException('Failed install protobuf')
os.chdir(robustus.cache)
shutil.rmtree(src_dir)
venv_install_folder = os.path.join(robustus.env, 'protobuf')
if os.path.exists(venv_install_folder):
shutil.rmtree(venv_install_folder)
shutil.copytree(install_dir, venv_install_folder)
executable_path = os.path.join(install_dir, 'bin', 'protoc')
ln(executable_path, os.path.join(robustus.env, 'bin', 'protoc'), force=True)
os.chdir(cwd)
# now install python part
robustus.install_through_wheeling(requirement_specifier, rob_file, ignore_index)
| Python | 0.000001 |
ae2981b26fce2641a9bae5af68a3d5043fdd8b46 | Fix disapear exception message (#31) | ovh/exceptions.py | ovh/exceptions.py | # -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2016, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
All exceptions used in OVH SDK derives from `APIError`
"""
class APIError(Exception):
"""Base OVH API exception, all specific exceptions inherits from it."""
def __init__(self, *args, **kwargs):
self.response = kwargs.pop('response', None)
super(APIError, self).__init__(*args, **kwargs)
class HTTPError(APIError):
"""Raised when the request fails at a low level (DNS, network, ...)"""
class InvalidKey(APIError):
"""Raised when trying to sign request with invalid key"""
class InvalidCredential(APIError):
"""Raised when trying to sign request with invalid consumer key"""
class InvalidResponse(APIError):
"""Raised when api response is not valid json"""
class InvalidRegion(APIError):
"""Raised when region is not in `REGIONS`."""
class ReadOnlyError(APIError):
"""Raised when attempting to modify readonly data."""
class ResourceNotFoundError(APIError):
"""Raised when requested resource does not exist."""
class BadParametersError(APIError):
"""Raised when request contains bad parameters."""
class ResourceConflictError(APIError):
"""Raised when trying to create an already existing resource."""
class NetworkError(APIError):
"""Raised when there is an error from network layer."""
class NotGrantedCall(APIError):
"""Raised when there is an error from network layer."""
class NotCredential(APIError):
"""Raised when there is an error from network layer."""
class Forbidden(APIError):
"""Raised when there is an error from network layer."""
| # -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2016, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
All exceptions used in OVH SDK derives from `APIError`
"""
class APIError(Exception):
"""Base OVH API exception, all specific exceptions inherits from it."""
def __init__(self, *args, **kwargs):
self.response = kwargs.get('response')
class HTTPError(APIError):
"""Raised when the request fails at a low level (DNS, network, ...)"""
class InvalidKey(APIError):
"""Raised when trying to sign request with invalid key"""
class InvalidCredential(APIError):
"""Raised when trying to sign request with invalid consumer key"""
class InvalidResponse(APIError):
"""Raised when api response is not valid json"""
class InvalidRegion(APIError):
"""Raised when region is not in `REGIONS`."""
class ReadOnlyError(APIError):
"""Raised when attempting to modify readonly data."""
class ResourceNotFoundError(APIError):
"""Raised when requested resource does not exist."""
class BadParametersError(APIError):
"""Raised when request contains bad parameters."""
class ResourceConflictError(APIError):
"""Raised when trying to create an already existing resource."""
class NetworkError(APIError):
"""Raised when there is an error from network layer."""
class NotGrantedCall(APIError):
"""Raised when there is an error from network layer."""
class NotCredential(APIError):
"""Raised when there is an error from network layer."""
class Forbidden(APIError):
"""Raised when there is an error from network layer."""
| Python | 0 |
63f6637228153b1f77ca860c297ff3554d802ce9 | Fix order history sorting logic, #sort() should be called before #reverse(). | model/orderbook.py | model/orderbook.py | # -*- encoding:utf8 -*-
import os
from model.oandapy import oandapy
class OrderBook(object):
def get_latest_orderbook(self, instrument, period, history):
oanda_token = os.environ.get('OANDA_TOKEN')
oanda = oandapy.API(environment="practice", access_token=oanda_token)
orders = oanda.get_orderbook(instrument=instrument)
try:
timeset = orders.keys()
timeset.sort()
timeset.reverse()
target_time = timeset[history]
except:
return None
order = orders[target_time]
order['time'] = target_time
return order
| # -*- encoding:utf8 -*-
import os
from model.oandapy import oandapy
class OrderBook(object):
def get_latest_orderbook(self, instrument, period, history):
oanda_token = os.environ.get('OANDA_TOKEN')
oanda = oandapy.API(environment="practice", access_token=oanda_token)
orders = oanda.get_orderbook(instrument=instrument)
try:
timeset = orders.keys()
timeset.reverse()
target_time = timeset[history]
except:
return None
order = orders[target_time]
order['time'] = target_time
return order
| Python | 0.000019 |
6741c59d726f1ceaf6edba82b6e97f501fc265ee | fix zero shape bug! | src/scripts/make_parts_dataset.py | src/scripts/make_parts_dataset.py | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import settings
sys.path.append(settings.CAFFE_PYTHON_PATH)
import skimage.io
import caffe
import numpy as np
import click
from glob import glob
import utils
from dataset import CUB_200_2011
from parts import Parts
@click.command()
@click.argument('out-path', type=click.Path(exists=True))
def main(out_path):
cub = CUB_200_2011(settings.CUB_ROOT)
cub_images = cub.get_all_images()
for image in cub_images:
image_path = image['img_file']
image_id = image['img_id']
cub_parts = cub.get_parts()
rel_image_path = image_path[len(settings.CUB_IMAGES_FOLDER):]
o_image = caffe.io.load_image(image_path)
parts = cub_parts.for_image(image_id)
head_parts = parts.filter_by_name(Parts.HEAD_PART_NAMES)
if len(head_parts) <= 2:
print "#parts:%d \tID:%d \tName:%s" % (len(head_parts), int(image_id), rel_image_path)
if len(head_parts) <= 1:
continue
part_image = head_parts.get_rect(o_image)
if 0 in part_image.shape:
print "#parts:%d \tID:%d \tName:%s + Shape:%s" % (len(head_parts), int(image_id), rel_image_path, str(part_image.shape))
out_image_path = os.path.join(out_path, rel_image_path)
utils.ensure_dir(os.path.dirname(out_image_path))
skimage.io.imsave(out_image_path, part_image)
if __name__ == '__main__':
main() | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import settings
sys.path.append(settings.CAFFE_PYTHON_PATH)
import skimage.io
import caffe
import numpy as np
import click
from glob import glob
import utils
from dataset import CUB_200_2011
from parts import Parts
@click.command()
@click.argument('out-path', type=click.Path(exists=True))
def main(out_path):
cub = CUB_200_2011(settings.CUB_ROOT)
cub_images = cub.get_all_images()
for image in cub_images:
image_path = image['img_file']
image_id = image['img_id']
cub_parts = cub.get_parts()
rel_image_path = image_path[len(settings.CUB_IMAGES_FOLDER):]
o_image = caffe.io.load_image(image_path)
parts = cub_parts.for_image(image_id)
head_parts = parts.filter_by_name(Parts.HEAD_PART_NAMES)
if len(head_parts) <= 2:
print "#parts:%d \tID:%d \tName:%s" % (len(head_parts), int(image_id), rel_image_path)
if len(head_parts) <= 1:
continue
part_image = head_parts.get_rect(o_image)
out_image_path = os.path.join(out_path, rel_image_path)
utils.ensure_dir(os.path.dirname(out_image_path))
skimage.io.imsave(out_image_path, part_image)
if __name__ == '__main__':
main() | Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.