commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
7dbf9d6e6fc52247ae88a1c674ee8c78edd98d65
|
remove timeout from completion_service_test - causes 'daemonic proccesses cannot have children' error
|
studio/completion_service/completion_service_test.py
|
studio/completion_service/completion_service_test.py
|
import uuid
import unittest
import os
import logging
from timeout_decorator import timeout
from .completion_service import CompletionService
from studio.util import has_aws_credentials
from studio.local_queue import get_local_queue_lock
logging.basicConfig()
class CompletionServiceTest(unittest.TestCase):
_multiprocess_shared_ = True
def test_two_experiments_with_cs_args(self, n_experiments=2, **kwargs):
if not(any(kwargs)):
return
mypath = os.path.dirname(os.path.realpath(__file__))
experimentId = str(uuid.uuid4())
results = {}
expected_results = {}
with CompletionService(experimentId, **kwargs) as cs:
for i in range(0, n_experiments):
key = cs.submitTask(
os.path.join(
mypath,
'completion_service_func.py'),
[i])
expected_results[key] = [i]
for i in range(0, n_experiments):
result = cs.getResults(blocking=True)
results[result[0]] = result[1]
self.assertEquals(results, expected_results)
@unittest.skipIf(not has_aws_credentials(),
'AWS credentials needed for this test')
def test_two_experiments_ec2(self):
mypath = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(
mypath,
'..',
'tests',
'test_config_http_client.yaml')
self.test_two_experiments_with_cs_args(
config=config_path,
cloud_timeout=100,
cloud='ec2')
@unittest.skipIf(not has_aws_credentials(),
'AWS credentials needed for this test')
def test_two_experiments_ec2spot(self):
mypath = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(
mypath,
'..',
'tests',
'test_config_http_client.yaml')
self.test_two_experiments_with_cs_args(
config=config_path,
cloud_timeout=100,
cloud='ec2spot')
@timeout(500, use_signals=False)
def test_two_experiments_apiserver(self):
mypath = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(
mypath,
'..',
'tests',
'test_config_http_client.yaml')
with get_local_queue_lock():
self.test_two_experiments_with_cs_args(config=config_path)
@unittest.skipIf(
'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ.keys(),
'Need GOOGLE_APPLICATION_CREDENTIALS env variable to' +
'use google cloud')
def test_two_experiments_gcloud(self):
mypath = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(
mypath,
'..',
'tests',
'test_config_http_client.yaml')
self.test_two_experiments_with_cs_args(
config=config_path,
cloud='gcloud')
@unittest.skip('TODO peterz scale down or fix')
# @unittest.skipIf(not has_aws_credentials(),
# 'AWS credentials needed for this test')
def test_many_experiments_ec2(self):
experimentId = str(uuid.uuid4())
mypath = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(
mypath,
'..',
'tests',
'test_config.yaml')
n_experiments = 100
num_workers = 30
print("Executing {} tasks with {} workers"
.format(n_experiments, num_workers))
results = {}
expected_results = {}
logger = logging.getLogger('test_1k_experiments_ec2')
logger.setLevel(10)
with CompletionService(experimentId,
config=config_path, cloud='ec2spot',
num_workers=num_workers) as cs:
def submit_task(i):
key = cs.submitTaskWithFiles(
os.path.join(
mypath,
'completion_service_func.py'),
[i],
{
'a': '/Users/peter.zhokhov/.bash_profile',
'p': '/Users/peter.zhokhov/.bash_profile'
})
logger.info('Submitted task ' + str(i))
expected_results[key] = [i]
'''
pool.map(submit_task, range(n_experiments))
print("Submitted")
pool.close()
pool.join()
'''
for i in range(n_experiments):
submit_task(i)
for i in range(0, n_experiments):
print("Trying to get a result " + str(i))
result = cs.getResults(blocking=True)
logger.info('Received result ' + str(result))
results[result[0]] = result[1]
self.assertEquals(results, expected_results)
@unittest.skipIf(
'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ.keys(),
'Need GOOGLE_APPLICATION_CREDENTIALS env variable to' +
'use google cloud')
def test_two_experiments_gcloud_nonspot(self):
mypath = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(
mypath,
'..',
'tests',
'test_config.yaml')
self.test_two_experiments_with_cs_args(
config=config_path,
cloud='gcloud')
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -49,46 +49,8 @@
ging
-%0Afrom timeout_decorator import timeout
%0A%0Afr
@@ -2104,45 +2104,8 @@
')%0A%0A
- @timeout(500, use_signals=False)%0A
|
fed1475564f7ca8a496d50446e4e5924befe8628
|
Update function output type annotation
|
tensorflow/core/function/capture/free_vars_detect.py
|
tensorflow/core/function/capture/free_vars_detect.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An independent module to detect free vars inside a function."""
import types
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.util import tf_inspect
def _parse_and_analyze(func):
"""Parse and analyze Python Function code."""
node, source = parser.parse_entity(func, future_features=())
node = qual_names.resolve(node)
entity_info = transformer.EntityInfo(
name=func.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node
def detect_function_free_vars(
func: types.FunctionType) -> tuple[list[str], list[str], list[int]]:
"""Detect free vars in any Python function."""
assert isinstance(
func, types.FunctionType
), f"The input should be of Python function type. Got type: {type(func)}."
node = _parse_and_analyze(func)
scope = anno.getanno(node, anno.Static.SCOPE)
free_vars_all = list(scope.free_vars)
globals_dict = func.__globals__
filtered = []
for var in free_vars_all:
base = str(var.qn[0])
if base in globals_dict:
obj = globals_dict[base]
if tf_inspect.ismodule(obj):
continue
if (tf_inspect.isclass(obj) or
tf_inspect.ismethod(obj) or
tf_inspect.isfunction(obj)):
if obj.__module__ != func.__module__:
continue
# Only keep free vars without subscript for simplicity
if not var.has_subscript():
filtered.append(str(var))
else:
if not var.has_subscript():
filtered.append(str(var))
return sorted(filtered)
|
Python
| 0.000007
|
@@ -762,16 +762,40 @@
rt types
+%0Afrom typing import List
%0A%0Afrom t
@@ -1674,21 +1674,16 @@
ee_vars(
-%0A
func: ty
@@ -1707,45 +1707,16 @@
-%3E
-tuple%5Blist%5Bstr%5D, list%5Bstr%5D, list%5Bint%5D
+List%5Bstr
%5D:%0A
|
cbfbc2dbeeb8a03cd96ef2756185099a9be9b714
|
Update data_provider_test.py
|
tensorflow_gan/examples/esrgan/data_provider_test.py
|
tensorflow_gan/examples/esrgan/data_provider_test.py
|
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfgan.examples.esrgan.data_provider"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
import tensorflow as tf
import data_provider
import collections
Params = collections.namedtuple('HParams', ['hr_dimension',
'scale',
'batch_size',
'data_dir'])
class DataProviderTest(tf.test.TestCase, absltest.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
self.HParams = Params(256, 4, 32, '/content/')
self.dataset = data_provider.get_div2k_data(self.HParams)
self.mock_lr = tf.random.normal([32, 64, 64, 3])
self.mock_hr = tf.random.normal([32, 256, 256, 3])
def test_dataset(self):
with self.cached_session() as sess:
self.assertIsInstance(self.dataset, tf.data.Dataset)
lr_image, hr_image = next(iter(self.dataset))
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(type(self.mock_lr), type(lr_image))
self.assertEqual(self.mock_lr.shape, lr_image.shape)
self.assertEqual(type(self.mock_hr), type(hr_image))
self.assertEqual(self.mock_hr.shape, hr_image.shape)
if __name__ == '__main__':
tf.test.main()
|
Python
| 0
|
@@ -656,118 +656,26 @@
%22%22%22%0A
-%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_fun
+import colle
ction
-%0A
+s
%0Afro
@@ -709,27 +709,8 @@
est%0A
-import numpy as np%0A
impo
@@ -754,29 +754,12 @@
der%0A
-import collections%0A%0AP
+%0A%0Ahp
aram
@@ -786,18 +786,18 @@
dtuple('
-HP
+hp
arams',
@@ -1049,11 +1049,8 @@
e):%0A
- %0A
de
@@ -1119,18 +1119,18 @@
elf.
-HP
+hp
arams =
Para
@@ -1125,17 +1125,18 @@
arams =
-P
+hp
arams(25
@@ -1215,10 +1215,10 @@
elf.
-HP
+hp
aram
@@ -1359,50 +1359,8 @@
f):%0A
- with self.cached_session() as sess:%0A
@@ -1412,16 +1412,56 @@
ataset)%0A
+ with self.cached_session() as sess:%0A
lr
|
91603858133183e056fbcd9cfee92ccc79e7e610
|
Interpolate with differentiable barycentrics.
|
tensorflow_graphics/rendering/triangle_rasterizer.py
|
tensorflow_graphics/rendering/triangle_rasterizer.py
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements a differentiable rasterizer of triangular meshes.
The resulting rendering contains perspective-correct interpolation of attributes
defined at the vertices of the rasterized meshes. This rasterizer does not
provide gradients through visibility, but it does through visible geometry and
attributes.
"""
import tensorflow as tf
from tensorflow_graphics.rendering import barycentrics as barycentrics_module
from tensorflow_graphics.rendering import interpolate
from tensorflow_graphics.rendering import rasterization_backend
from tensorflow_graphics.rendering import utils
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
def _dim_value(dim):
return 1 if dim is None else tf.compat.v1.dimension_value(dim)
def _merge_batch_dims(tensor, last_axis):
"""Merges all dimensions into one starting from 0 till `last_axis` exluding."""
return tf.reshape(tensor, [-1] + tensor.shape.as_list()[last_axis:])
def _restore_batch_dims(tensor, batch_shape):
"""Unpack first dimension into batch_shape, preserving the rest of the dimensions."""
return tf.reshape(tensor, batch_shape + tensor.shape.as_list()[1:])
def rasterize(vertices,
triangles,
attributes,
view_projection_matrix,
image_size,
backend=rasterization_backend.RasterizationBackends.OPENGL,
name=None):
"""Rasterizes the scene.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V`
vertices, each defined by a 3D point.
triangles: A tensor of shape `[T, 3]` containing `T` triangles, each
associated with 3 vertices from `vertices`.
attributes: A dictionary of tensors, each of shape `[A1, ..., An, V, K_a]`
containing batches of `V` vertices, each associated with K-dimensional
attributes. K_a may vary by attribute.
view_projection_matrix: A tensor of shape `[A1, ..., An, 4, 4]` containing
batches of matrices used to transform vertices from model to clip
coordinates.
image_size: A tuple (height, width) containing the dimensions in pixels of
the rasterized image.
backend: A rasterization_backend.RasterizationBackends enum containing the
backend method to use for rasterization.
name: A name for this op. Defaults to 'triangle_rasterizer_rasterize'.
Returns:
A dictionary. The key "mask" is of shape `[A1, ..., An, height, width, 1]`
and stores a value of `0` of the pixel is assciated with the background,
and `1` with the foreground. The key "barycentrics" is of shape
`[A1, ..., An, height, width, 3]` and stores barycentric weights. Finally,
the dictionary contains perspective correct interpolated attributes of shape
`[A1, ..., An, height, width, K]` per entry in the `attributes` dictionary.
"""
with tf.compat.v1.name_scope(
name, "triangle_rasterizer_rasterize",
(vertices, triangles, attributes, view_projection_matrix)):
vertices = tf.convert_to_tensor(value=vertices)
triangles = tf.convert_to_tensor(value=triangles)
view_projection_matrix = tf.convert_to_tensor(value=view_projection_matrix)
shape.check_static(
tensor=vertices,
tensor_name="vertices",
has_rank_greater_than=1,
has_dim_equals=((-1, 3)))
shape.check_static(
tensor=triangles,
tensor_name="triangles",
has_rank=2,
has_dim_equals=((-1, 3)))
shape.check_static(
tensor=view_projection_matrix,
tensor_name="view_projection_matrix",
has_dim_equals=(((-2, 4), (-1, 4))))
image_size_backend = (int(image_size[1]), int(image_size[0]))
input_batch_shape = vertices.shape[:-2]
view_projection_matrix = _merge_batch_dims(
view_projection_matrix, last_axis=-2)
vertices = _merge_batch_dims(vertices, last_axis=-2)
rasterized = rasterization_backend.rasterize(
vertices,
triangles,
view_projection_matrix,
image_size_backend,
backend=backend)
outputs = {
"mask":
_restore_batch_dims(rasterized.foreground_mask, input_batch_shape),
"triangle_indices":
_restore_batch_dims(rasterized.triangle_id, input_batch_shape)
}
# Extract batch shape in order to make sure it is preserved after `gather`
# operation.
batch_shape = rasterized.triangle_id.shape[:-3]
batch_shape = [_dim_value(dim) for dim in batch_shape]
clip_space_vertices = utils.transform_homogeneous(view_projection_matrix,
vertices)
barycentrics = barycentrics_module.differentiable_barycentrics(
rasterized, clip_space_vertices, triangles).barycentrics.value
outputs["barycentrics"] = _restore_batch_dims(
rasterized.foreground_mask * barycentrics, input_batch_shape)
for key, attribute in attributes.items():
attribute = tf.convert_to_tensor(value=attribute)
attribute = _merge_batch_dims(attribute, last_axis=-2)
masked_attribute = interpolate.interpolate_vertex_attribute(
attribute, rasterized)
masked_attribute = _restore_batch_dims(masked_attribute.value,
input_batch_shape)
outputs[key] = masked_attribute
return outputs
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
|
Python
| 0.000002
|
@@ -5296,28 +5296,26 @@
es)%0A
-barycentrics
+rasterized
= baryc
@@ -5409,16 +5409,46 @@
iangles)
+%0A barycentrics = rasterized
.barycen
|
ef0d0fa26bfd22c281c54bc348877afd0a7ee9d7
|
Use regex to match user metrics
|
tests/integration/blueprints/metrics/test_metrics.py
|
tests/integration/blueprints/metrics/test_metrics.py
|
"""
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
# To be overridden by test parametrization
@pytest.fixture
def config_overrides():
return {}
@pytest.fixture
def client(admin_app, config_overrides, make_admin_app):
app = make_admin_app(**config_overrides)
with app.app_context():
yield app.test_client()
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': True}])
def test_metrics(client):
response = client.get('/metrics')
assert response.status_code == 200
assert response.content_type == 'text/plain; version=0.0.4; charset=utf-8'
assert response.mimetype == 'text/plain'
assert response.get_data(as_text=True) == (
'users_active_count 0\n'
'users_uninitialized_count 0\n'
'users_suspended_count 0\n'
'users_deleted_count 0\n'
'users_total_count 0\n'
)
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': False}])
def test_disabled_metrics(client):
response = client.get('/metrics')
assert response.status_code == 404
|
Python
| 0.000006
|
@@ -94,16 +94,27 @@
s.%0A%22%22%22%0A%0A
+import re%0A%0A
import p
@@ -706,54 +706,90 @@
in'%0A
+%0A
-assert response.get_data(as_text=True) ==
+# Not a full match as there can be other metrics, too.%0A regex = re.compile
(%0A
@@ -810,25 +810,28 @@
ctive_count
-0
+%5C%5Cd+
%5Cn'%0A
@@ -849,33 +849,36 @@
itialized_count
-0
+%5C%5Cd+
%5Cn'%0A 'use
@@ -888,33 +888,36 @@
suspended_count
-0
+%5C%5Cd+
%5Cn'%0A 'use
@@ -929,25 +929,28 @@
leted_count
-0
+%5C%5Cd+
%5Cn'%0A
@@ -968,17 +968,20 @@
l_count
-0
+%5C%5Cd+
%5Cn'%0A
@@ -982,16 +982,85 @@
'%0A )%0A
+ assert regex.search(response.get_data(as_text=True)) is not None%0A
%0A%0A@pytes
|
8f70cdec4534651d7479645b4675a23f8d0c7005
|
add home.html ;
|
src/greenpointtreesstore/greenpointtreesstore/urls.py
|
src/greenpointtreesstore/greenpointtreesstore/urls.py
|
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from mezzanine.core.views import direct_to_template
from mezzanine.conf import settings
admin.autodiscover()
# Add the urlpatterns for any custom Django applications here.
# You can also change the ``home`` view to add your own functionality
# to the project's homepage.
urlpatterns = i18n_patterns("",
# Change the admin prefix here to use an alternate URL for the
# admin interface, which would be marginally more secure.
("^admin/", include(admin.site.urls)),
)
if settings.USE_MODELTRANSLATION:
urlpatterns += patterns('',
url('^i18n/$', 'django.views.i18n.set_language', name='set_language'),
)
urlpatterns += patterns('',
# We don't want to presume how your homepage works, so here are a
# few patterns you can use to set it up.
# HOMEPAGE AS STATIC TEMPLATE
# ---------------------------
# This pattern simply loads the index.html template. It isn't
# commented out like the others, so it's the default. You only need
# one homepage pattern, so if you use a different one, comment this
# one out.
url("^$", direct_to_template, {"template": "index.html"}, name="home"),
# HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE
# ---------------------------------------------
# This pattern gives us a normal ``Page`` object, so that your
# homepage can be managed via the page tree in the admin. If you
# use this pattern, you'll need to create a page in the page tree,
# and specify its URL (in the Meta Data section) as "/", which
# is the value used below in the ``{"slug": "/"}`` part.
# Also note that the normal rule of adding a custom
# template per page with the template name using the page's slug
# doesn't apply here, since we can't have a template called
# "/.html" - so for this case, the template "pages/index.html"
# should be used if you want to customize the homepage's template.
# url("^$", "mezzanine.pages.views.page", {"slug": "/"}, name="home"),
# HOMEPAGE FOR A BLOG-ONLY SITE
# -----------------------------
# This pattern points the homepage to the blog post listing page,
# and is useful for sites that are primarily blogs. If you use this
# pattern, you'll also need to set BLOG_SLUG = "" in your
# ``settings.py`` module, and delete the blog page object from the
# page tree in the admin if it was installed.
# url("^$", "mezzanine.blog.views.blog_post_list", name="home"),
# MEZZANINE'S URLS
# ----------------
# ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW.
# ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN
# FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls``
# WILL NEVER BE MATCHED!
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
("^", include("mezzanine.urls")),
# MOUNTING MEZZANINE UNDER A PREFIX
# ---------------------------------
# You can also mount all of Mezzanine's urlpatterns under a
# URL prefix if desired. When doing this, you need to define the
# ``SITE_PREFIX`` setting, which will contain the prefix. Eg:
# SITE_PREFIX = "my/site/prefix"
# For convenience, and to avoid repeating the prefix, use the
# commented out pattern below (commenting out the one above of course)
# which will make use of the ``SITE_PREFIX`` setting. Make sure to
# add the import ``from django.conf import settings`` to the top
# of this file as well.
# Note that for any of the various homepage patterns above, you'll
# need to use the ``SITE_PREFIX`` setting as well.
# ("^%s/" % settings.SITE_PREFIX, include("mezzanine.urls"))
)
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
|
Python
| 0
|
@@ -1307,32 +1307,122 @@
%7D, name=%22home%22),
+%0A url(%22%5Ehome.html$%22, direct_to_template, %7B%22template%22: %22index.html%22%7D, name=%22home.html%22),
%0A%0A # HOMEPAGE
|
e71bbe6dff52d7977332ee37ffa9df5505173a0c
|
Use sets like they should be, and finish up changes for the night
|
test_utils/management/commands/relational_dumpdata.py
|
test_utils/management/commands/relational_dumpdata.py
|
from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from optparse import make_option
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django.db.models import get_app, get_apps, get_models
def _relational_dumpdata(app, collected):
objects = []
for mod in get_models(app):
objects.extend(mod._default_manager.all())
#Got models, now get their relationships.
#Thanks to http://www.djangosnippets.org/snippets/918/
related = []
collected.add(s for s in set([(x.__class__, x.pk) for x in objects])) #Just used to track already gotten models
for obj in objects:
for f in obj._meta.fields :
if isinstance(f, ForeignKey):
new = getattr(obj, f.name) # instantiate object
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
for f in obj._meta.many_to_many:
if isinstance(f, ManyToManyField):
for new in getattr(obj, f.name).all():
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
if related != []:
objects.extend(related)
return (objects, collected)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='App to exclude (use multiple --exclude to exclude multiple apps).'),
)
help = 'Output the contents of the database as a fixture of the given format.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
format = options.get('format','json')
indent = options.get('indent',None)
exclude = options.get('exclude',[])
show_traceback = options.get('traceback', False)
excluded_apps = [get_app(app_label) for app_label in exclude]
if len(app_labels) == 0:
app_list = [app for app in get_apps() if app not in excluded_apps]
else:
app_list = [get_app(app_label) for app_label in app_labels]
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
objects = []
collected = set()
for app in app_list:
objects, collected = _relational_dumpdata(app, collected)
#****End New stuff
try:
return serializers.serialize(format, objects, indent=indent)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
|
Python
| 0
|
@@ -547,23 +547,8 @@
add(
-s for s in set(
%5B(x.
@@ -582,17 +582,16 @@
bjects%5D)
-)
#Just
@@ -2679,155 +2679,8 @@
ng.%0A
- if format not in serializers.get_public_serializer_formats():%0A raise CommandError(%22Unknown serialization format: %25s%22 %25 format)%0A%0A
@@ -2912,16 +2912,41 @@
pp_list:
+ #Yey for ghetto recusion
%0A
|
cc1b63e76a88fd589bfe3fce2f6cbe5becf995bc
|
use no_backprop_mode
|
tests/links_tests/model_tests/vgg_tests/test_vgg16.py
|
tests/links_tests/model_tests/vgg_tests/test_vgg16.py
|
import unittest
import numpy as np
from chainer.initializers import Zero
from chainer import testing
from chainer.testing import attr
from chainer import Variable
from chainercv.links import VGG16
@testing.parameterize(
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'pool5', 'shapes': (1, 512, 7, 7), 'n_class': None},
{'pick': ['conv5_3', 'conv4_2'],
'shapes': ((1, 512, 14, 14), (1, 512, 28, 28)), 'n_class': None},
)
class TestVGG16Call(unittest.TestCase):
def setUp(self):
self.link = VGG16(
n_class=self.n_class, pretrained_model=None,
initialW=Zero())
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
features = self.link(x1)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
|
Python
| 0.000005
|
@@ -30,16 +30,31 @@
as np%0A%0A
+import chainer%0A
from cha
@@ -841,16 +841,61 @@
at32)))%0A
+ with chainer.no_backprop_mode():%0A
|
6fdb703edc929c72d871b7830959fb1e32c86c48
|
Remove compositing check from rasterize_and_record_micro test.
|
tools/perf/measurements/rasterize_and_record_micro.py
|
tools/perf/measurements/rasterize_and_record_micro.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import time
from telemetry.core.util import TimeoutException
from telemetry.page import page_measurement
from telemetry.page import page_test
class RasterizeAndRecordMicro(page_measurement.PageMeasurement):
def __init__(self):
super(RasterizeAndRecordMicro, self).__init__('', True)
self._compositing_features_enabled = False
self._chrome_branch_number = None
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--start-wait-time', type='float',
default=2,
help='Wait time before the benchmark is started '
'(must be long enought to load all content)')
parser.add_option('--rasterize-repeat', type='int',
default=100,
help='Repeat each raster this many times. Increase '
'this value to reduce variance.')
parser.add_option('--record-repeat', type='int',
default=100,
help='Repeat each record this many times. Increase '
'this value to reduce variance.')
parser.add_option('--timeout', type='int',
default=120,
help='The length of time to wait for the micro '
'benchmark to finish, expressed in seconds.')
parser.add_option('--report-detailed-results',
action='store_true',
help='Whether to report additional detailed results.')
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-impl-side-painting',
'--force-compositing-mode',
'--enable-threaded-compositing',
'--enable-gpu-benchmarking'
])
def DidStartBrowser(self, browser):
# TODO(vmpstr): Remove this temporary workaround when reference build has
# been updated to branch 1713 or later.
backend = browser._browser_backend # pylint: disable=W0212
self._chrome_branch_number = getattr(backend, 'chrome_branch_number', None)
if (not self._chrome_branch_number or
(sys.platform != 'android' and self._chrome_branch_number < 1713)):
raise page_test.TestNotSupportedOnPlatformFailure(
'rasterize_and_record_micro requires Chrome branch 1713 '
'or later. Skipping measurement.')
# Check if the we actually have threaded forced compositing enabled.
system_info = browser.GetSystemInfo()
if (system_info.gpu.feature_status
and system_info.gpu.feature_status.get(
'compositing', None) == 'enabled_force_threaded'):
self._compositing_features_enabled = True
def MeasurePage(self, page, tab, results):
if not self._compositing_features_enabled:
raise page_test.TestNotSupportedOnPlatformFailure(
'Compositing feature status unknown or not '+
'forced and threaded. Skipping measurement.')
try:
tab.WaitForJavaScriptExpression("document.readyState == 'complete'", 10)
except TimeoutException:
pass
time.sleep(self.options.start_wait_time)
record_repeat = self.options.record_repeat
rasterize_repeat = self.options.rasterize_repeat
# Enqueue benchmark
tab.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.done = false;
window.benchmark_results.scheduled =
chrome.gpuBenchmarking.runMicroBenchmark(
"rasterize_and_record_benchmark",
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
}, {
"record_repeat_count": """ + str(record_repeat) + """,
"rasterize_repeat_count": """ + str(rasterize_repeat) + """
});
""")
scheduled = tab.EvaluateJavaScript('window.benchmark_results.scheduled')
if (not scheduled):
raise page_measurement.MeasurementFailure(
'Failed to schedule rasterize_and_record_micro')
tab.WaitForJavaScriptExpression(
'window.benchmark_results.done', self.options.timeout)
data = tab.EvaluateJavaScript('window.benchmark_results.results')
pixels_recorded = data['pixels_recorded']
record_time = data['record_time_ms']
pixels_rasterized = data['pixels_rasterized']
rasterize_time = data['rasterize_time_ms']
results.Add('pixels_recorded', 'pixels', pixels_recorded)
results.Add('record_time', 'ms', record_time)
results.Add('pixels_rasterized', 'pixels', pixels_rasterized)
results.Add('rasterize_time', 'ms', rasterize_time)
# TODO(skyostil): Remove this temporary workaround when reference build has
# been updated to branch 1931 or later.
if ((self._chrome_branch_number and self._chrome_branch_number >= 1931) or
sys.platform == 'android'):
record_time_sk_null_canvas = data['record_time_sk_null_canvas_ms']
record_time_painting_disabled = data['record_time_painting_disabled_ms']
results.Add('record_time_sk_null_canvas', 'ms',
record_time_sk_null_canvas)
results.Add('record_time_painting_disabled', 'ms',
record_time_painting_disabled)
if self.options.report_detailed_results:
pixels_rasterized_with_non_solid_color = \
data['pixels_rasterized_with_non_solid_color']
pixels_rasterized_as_opaque = \
data['pixels_rasterized_as_opaque']
total_layers = data['total_layers']
total_picture_layers = data['total_picture_layers']
total_picture_layers_with_no_content = \
data['total_picture_layers_with_no_content']
total_picture_layers_off_screen = \
data['total_picture_layers_off_screen']
results.Add('pixels_rasterized_with_non_solid_color', 'pixels',
pixels_rasterized_with_non_solid_color)
results.Add('pixels_rasterized_as_opaque', 'pixels',
pixels_rasterized_as_opaque)
results.Add('total_layers', 'count', total_layers)
results.Add('total_picture_layers', 'count', total_picture_layers)
results.Add('total_picture_layers_with_no_content', 'count',
total_picture_layers_with_no_content)
results.Add('total_picture_layers_off_screen', 'count',
total_picture_layers_off_screen)
|
Python
| 0.000004
|
@@ -462,55 +462,8 @@
ue)%0A
- self._compositing_features_enabled = False%0A
@@ -2456,581 +2456,50 @@
%0A%0A
- # Check if the we actually have threaded forced compositing enabled.%0A system_info = browser.GetSystemInfo()%0A if (system_info.gpu.feature_status%0A and system_info.gpu.feature_status.get(%0A 'compositing', None) == 'enabled_force_threaded'):%0A self._compositing_features_enabled = True%0A%0A def MeasurePage(self, page, tab, results):%0A if not self._compositing_features_enabled:%0A raise page_test.TestNotSupportedOnPlatformFailure(%0A 'Compositing feature status unknown or not '+%0A 'forced and threaded. Skipping measurement.')%0A
+def MeasurePage(self, page, tab, results):
%0A
|
b635d3bb0a0de01539d66dda4555b306c59082ee
|
fix version number
|
constant2/__init__.py
|
constant2/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from ._constant2 import Constant
except: # pragma: no cover
pass
__version__ = "0.0.9"
__short_description__ = "provide extensive way of managing your constant variable."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "husanhe@gmail.com"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "husanhe@gmail.com"
__github_username__ = "MacHu-GWU"
|
Python
| 0.000014
|
@@ -143,9 +143,10 @@
0.0.
-9
+10
%22%0A__
|
28b7b5f43d5206609f789a94c614d6811ae87cef
|
Fix call to resolve nick protection
|
txircd/modules/extra/services/account_nick_protect.py
|
txircd/modules/extra/services/account_nick_protect.py
|
from twisted.internet import reactor
from twisted.plugin import IPlugin
from txircd.config import ConfigValidationError
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
from weakref import WeakKeyDictionary
from datetime import timedelta
class AccountNickProtect(ModuleData):
implements(IPlugin, IModuleData)
name = "AccountNickProtect"
blockedNickChangeUsers = WeakKeyDictionary()
def actions(self):
return [ ("welcome", 1, self.checkNickOnConnect),
("changenick", 1, self.checkNickOnNickChange),
("quit", 1, self.cancelTimerOnQuit),
("commandpermission-NICK", 10, self.checkCanChangeNick) ]
def verifyConfig(self, config):
if "account_nick_protect_seconds" in config:
if not isinstance(config["account_nick_protect_seconds"], int) or config["account_nick_protect_seconds"] < 1:
raise ConfigValidationError("account_nick_protect_seconds", "invalid number")
if "account_nick_recover_seconds" in config:
if not isinstance(config["account_nick_recover_seconds"], int) or config["account_nick_recover_seconds"] < 1:
raise ConfigValidationError("account_nick_recover_seconds", "invalid number")
def checkNickOnConnect(self, user):
if not self.userSignedIntoNickAccount(user):
self.applyNickProtection(user)
def checkNickOnNickChange(self, user, oldNick, fromServer):
self.cancelOldProtectTimer(user)
if not self.userSignedIntoNickAccount(user):
self.applyNickProtection(user)
def cancelTimerOnQuit(self, user, reason, fromServer):
self.cancelOldProtectTimer(user)
def checkCanChangeNick(self, user, data):
if user not in self.blockedNickChangeUsers:
return None
if self.blockedNickChangeUsers[user] > now():
del self.blockedNickChangeUsers[user]
return None
user.sendMessage("NOTICE", "You can't change nicknames yet.")
return False
def applyNickProtection(self, user):
if user.uuid[:3] != self.ircd.serverID:
return
protectDelay = self.ircd.config.get("account_nick_protect_seconds", 30)
user.sendMessage("NOTICE", "The nickname you're using is owned by an account to which you are not identified. Please identify to that account or change your nick in the next \x02{}\x02 seconds.".format(protectDelay))
user.cache["accountNickProtectTimer"] = reactor.callLater(protectDelay, self.resolveNickProtection, user.nick)
def resolveNickProtection(self, user, nick):
if user.nick != nick:
return
if self.userSignedIntoNickAccount(user):
return
user.changeNick(user.uuid)
recoverSeconds = self.ircd.config.get("account_nick_recover_seconds", 10)
if recoverSeconds > 0:
recoveryTime = timedelta(seconds = recoverSeconds)
self.blockedNickChangeUsers[user] = now() + recoveryTime
def cancelOldProtectTimer(self, user):
if "accountNickProtectTimer" not in user.cache:
return
if user.cache["accountNickProtectTimer"].active:
user.cache["accountNickProtectTimer"].cancel()
del user.cache["accountNickProtectTimer"]
def userSignedIntoNickAccount(self, user):
accountName = self.ircd.runActionUntilValue("accountfromnick", user.nick)
if accountName is None:
return True # Nick applies to all accounts and no-account users
userAccount = user.metadataValue("account")
if userAccount == accountName:
return True
return False
accountNickProtect = AccountNickProtect()
|
Python
| 0
|
@@ -2370,16 +2370,22 @@
tection,
+ user,
user.ni
|
770c4fd0b282ee355d2ea3e662786113dd6b4e74
|
add 1.4.2 (#26472)
|
var/spack/repos/builtin/packages/py-nipype/package.py
|
var/spack/repos/builtin/packages/py-nipype/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNipype(PythonPackage):
"""Neuroimaging in Python: Pipelines and Interfaces."""
homepage = "https://nipy.org/nipype"
pypi = "nipype/nipype-1.6.0.tar.gz"
version('1.6.1', sha256='8428cfc633d8e3b8c5650e241e9eedcf637b7969bcd40f3423334d4c6b0992b5')
version('1.6.0', sha256='bc56ce63f74c9a9a23c6edeaf77631377e8ad2bea928c898cc89527a47f101cf')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-click@6.6.0:', type=('build', 'run'))
depends_on('py-networkx@2.0:', type=('build', 'run'))
depends_on('py-nibabel@2.1.0:', type=('build', 'run'))
depends_on('py-numpy@1.13:', type=('build', 'run'), when='^python@:3.6')
depends_on('py-numpy@1.15.3:', type=('build', 'run'), when='^python@3.7:')
depends_on('py-packaging', type=('build', 'run'))
depends_on('py-prov@1.5.2:', type=('build', 'run'))
depends_on('py-pydot@1.2.3:', type=('build', 'run'))
depends_on('py-python-dateutil@2.2:', type=('build', 'run'))
depends_on('py-rdflib@5.0.0:', type=('build', 'run'))
depends_on('py-scipy@0.14:', type=('build', 'run'))
depends_on('py-simplejson@3.8.0:', type=('build', 'run'))
depends_on('py-traits@4.6:4,5.1:', type=('build', 'run'))
depends_on('py-filelock@3.0.0:', type=('build', 'run'))
depends_on('py-etelemetry@0.2.0:', type=('build', 'run'))
|
Python
| 0.000001
|
@@ -587,35 +587,198 @@
f')%0A
-%0A depends_on('python@3.6
+ version('1.4.2', sha256='069dcbb0217f13af6ee5a7f1e58424b9061290a3e10d7027d73bf44e26f820db')%0A%0A depends_on('python@3.6:', when='@1.5:', type=('build', 'run'))%0A depends_on('python@3.5
:',
@@ -875,18 +875,16 @@
lick@6.6
-.0
:', type
@@ -931,18 +931,30 @@
tworkx@2
-.0
+:', when='@1.6
:', type
@@ -996,20 +996,19 @@
py-n
-ibabel@2.1.0
+etworkx@1.9
:',
@@ -1054,17 +1054,18 @@
py-n
-umpy@1
+ibabel@2
.1
-3
:',
@@ -1077,32 +1077,67 @@
('build', 'run')
+)%0A depends_on('py-numpy@1.15.3:'
, when='%5Epython@
@@ -1140,13 +1140,36 @@
hon@
-:
3.
-6'
+7:', type=('build', 'run')
)%0A
@@ -1198,36 +1198,94 @@
@1.1
-5.3:', type=('build', 'run')
+3:', when='@1.5: %5Epython@:3.6', type=('build', 'run'))%0A depends_on('py-numpy@1.12:'
, wh
@@ -1296,21 +1296,44 @@
%5Epython@
+:
3.
-7:'
+6', type=('build', 'run')
)%0A de
@@ -1485,32 +1485,100 @@
build', 'run'))%0A
+ depends_on('py-pydotplus', when='@:1.5', type=('build', 'run'))%0A
depends_on('
@@ -1653,20 +1653,30 @@
rdflib@5
-.0.0
+:', when='@1.5
:', type
@@ -1783,18 +1783,16 @@
json@3.8
-.0
:', type
@@ -1905,12 +1905,8 @@
ck@3
-.0.0
:',
@@ -1961,18 +1961,30 @@
etry@0.2
-.0
+:', when='@1.5
:', type
@@ -1982,28 +1982,141 @@
5:', type=('build', 'run'))%0A
+ depends_on('py-etelemetry', type=('build', 'run'))%0A%0A depends_on('py-sphinxcontrib-napoleon', type='test')%0A
|
63d4d37c9194aacd783e911452a34ca78a477041
|
add latest version 1.2.0 (#23528)
|
var/spack/repos/builtin/packages/py-vermin/package.py
|
var/spack/repos/builtin/packages/py-vermin/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyVermin(PythonPackage):
"""Concurrently detect the minimum Python versions needed to run code."""
homepage = "https://github.com/netromdk/vermin"
url = "https://github.com/netromdk/vermin/archive/v1.1.1.tar.gz"
maintainers = ['netromdk']
version('1.1.1', sha256='d13b2281ba16c9d5b0913646483771789552230a9ed625e2cd92c5a112e4ae80')
version('1.1.0', sha256='62d9f1b6694f50c22343cead2ddb6e2b007d24243fb583f61ceed7540fbe660b')
version('1.0.3', sha256='1503be05b55cacde1278a1fe55304d8ee889ddef8ba16e120ac6686259bec95c')
version('1.0.2', sha256='e999d5f5455e1116b366cd1dcc6fecd254c7ae3606549a61bc044216f9bb5b55')
version('1.0.1', sha256='c06183ba653b9d5f6687a6686da8565fb127fab035f9127a5acb172b7c445079')
version('1.0.0', sha256='e598e9afcbe3fa6f3f3aa894da81ccb3954ec9c0783865ecead891ac6aa57207')
version('0.10.5', sha256='00601356e8e10688c52248ce0acc55d5b45417b462d5aa6887a6b073f0d33e0b')
version('0.10.4', sha256='bd765b84679fb3756b26f462d2aab4af3183fb65862520afc1517f6b39dea8bf')
version('0.10.0', sha256='3458a4d084bba5c95fd7208888aaf0e324a07ee092786ee4e5529f539ab4951f')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
|
Python
| 0
|
@@ -416,19 +416,19 @@
hive/v1.
-1.1
+2.0
.tar.gz%22
@@ -461,16 +461,112 @@
omdk'%5D%0A%0A
+ version('1.2.0', sha256='a3ab6dc6608b859f301b9a77d5cc0d03335aae10c49d47a91b82be5be48c4f1f')%0A
vers
|
c73d237d087792a00b6c0aceaf56674e398ea8e0
|
version bump (#8884)
|
var/spack/repos/builtin/packages/simplemoc/package.py
|
var/spack/repos/builtin/packages/simplemoc/package.py
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Simplemoc(MakefilePackage):
"""The purpose of this mini-app is to demonstrate the performance
characterterics and viability of the Method of Characteristics (MOC)
for 3D neutron transport calculations in the context of full scale
light water reactor simulation."""
homepage = "https://github.com/ANL-CESAR/SimpleMOC/"
url = "https://github.com/ANL-CESAR/SimpleMOC/archive/master.tar.gz"
version('1.0', 'd8827221a4ae76e9766a32e16d143e60')
tags = ['proxy-app']
variant('mpi', default=True, description='Build with MPI support')
depends_on('mpi', when='+mpi')
build_directory = 'src'
@property
def build_targets(self):
targets = []
cflags = '-std=gnu99'
ldflags = '-lm'
if self.compiler.name == 'gcc' or self.compiler.name == 'intel':
cflags += ' ' + self.compiler.openmp_flag
if '+mpi' in self.spec:
targets.append('CC={0}'.format(self.spec['mpi'].mpicc))
targets.append('CFLAGS={0}'.format(cflags))
targets.append('LDFLAGS={0}'.format(ldflags))
return targets
def install(self, spec, prefix):
mkdir(prefix.bin)
install('src/SimpleMOC', prefix.bin)
|
Python
| 0
|
@@ -1658,14 +1658,10 @@
ive/
-master
+v4
.tar
@@ -1683,47 +1683,84 @@
on('
-1.0', 'd8827221a4ae76e9766a32e16d143e60
+4', sha256='a39906014fdb234c43bf26e1919bdc8a13097788812e0b353a492b8e568816a6
')%0A%0A
|
28c9c0349e0f86fbdee8a02b46386e42dbe702a2
|
fix conflict
|
oct_turrets/turret.py
|
oct_turrets/turret.py
|
import time
import json
from oct_turrets.base import BaseTurret
from oct_turrets.canon import Canon
class Turret(BaseTurret):
"""This class represent the classic turret for oct
"""
def init_commands(self):
"""Initialize the basics commandes for the turret
"""
self.commands['start'] = self.run
self.commands['status_request'] = self.send_status
def send_status(self, msg=None):
"""Reply to the master by sending the current status
"""
if not self.already_responded:
print("responding to master")
reply = self.build_status_message()
self.result_collector.send_json(reply)
self.already_responded = True
def start(self):
"""Start the turret and wait for the master to run the test
"""
print("starting turret")
self.status = "Ready"
while self.start_loop:
payload = self.master_publisher.recv_string()
payload = json.loads(payload)
self.exec_command(payload)
def run(self, msg=None):
"""The main run method
"""
print("Starting tests")
self.start_time = time.time()
self.start_loop = False
self.status = 'running'
self.send_status()
if 'rampup' in self.config:
rampup = float(self.config['rampup']) / float(self.config['canons'])
else:
rampup = 0
last_insert = 0
print(rampup)
if rampup > 0 and rampup < 1:
timeout = rampup * 1000
else:
timeout = 1000
try:
while self.run_loop:
if len(self.canons) < self.config['canons'] and time.time() - last_insert >= rampup:
canon = Canon(self.start_time, self.script_module, self.uuid)
canon.daemon = True
self.canons.append(canon)
canon.start()
last_insert = time.time()
print(len(self.canons))
socks = dict(self.poller.poll(timeout))
if self.master_publisher in socks:
data = self.master_publisher.recv_string()
data = json.loads(data)
if 'command' in data and data['command'] == 'stop': # not managed, must break the loop
print("Exiting loop, premature stop")
self.run_loop = False
break
if self.local_result in socks:
results = self.local_result.recv_json()
results['turret_name'] = self.config['name']
self.result_collector.send_json(results)
for i in self.canons:
i.run_loop = False
for i in self.canons:
i.join()
except (Exception, RuntimeError, KeyboardInterrupt) as e:
self.status = "Aborted"
print(e)
self.send_status()
# data = self.build_status_message()
# self.result_collector.send_json(data)
# self.start_loop = True
# self.already_responded = False
def stop(self, msg=None):
"""The main stop method
"""
pass
|
Python
| 0.031708
|
@@ -16,16 +16,33 @@
ort json
+%0Aimport traceback
%0A%0Afrom o
@@ -1489,30 +1489,8 @@
= 0
-%0A print(rampup)
%0A%0A
@@ -2001,52 +2001,8 @@
me()
-%0A print(len(self.canons))
%0A%0A
@@ -2945,16 +2945,16 @@
rint(e)%0A
-
@@ -2976,16 +2976,50 @@
tatus()%0A
+ traceback.print_exc()%0A
|
b962bf374ac57c6084fb6724b9abad21719cb612
|
Change dump filename extensions --> txt
|
iotendpoints/endpoints/views.py
|
iotendpoints/endpoints/views.py
|
import os
import pytz
import base64
# from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from endpoints.models import Request
from django.contrib.auth import authenticate
META_KEYS = ['QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_HOST', 'REMOTE_USER',
'REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT']
def index(request):
return HttpResponse("Hello, world. This is IoT endpoint.")
def _dump_request_endpoint(request):
"""
Dump a HttpRequest to files in a directory.
"""
now = timezone.now().astimezone(pytz.utc)
r = Request(method=request.method)
r.path = os.path.join(now.strftime('%Y-%m-%d'), now.strftime('%Y%m%dT%H%M%S.%fZ'))
fpath = os.path.join(settings.MEDIA_ROOT, r.path)
os.makedirs(fpath, exist_ok=True)
fname = os.path.join(fpath, 'request.raw')
with open(fname, 'wb') as destination:
destination.write(request.body)
res = []
res.append('Request Method: {}'.format(request.method))
res.append('--- GET parameters ---')
for key, val in request.GET.items():
res.append('{}={}'.format(key, val))
res.append('--- POST parameters ---')
for key, val in request.POST.items():
res.append('{}={}'.format(key, val))
res.append('--- META parameters ---')
for key, val in request.META.items():
if key.startswith('HTTP_') or key.startswith('CONTENT_') or key in META_KEYS:
res.append('{}={}'.format(key, val))
res.append('--- FILES ---')
fnr = 0
for key, val in request.FILES.items():
res.append('{}. {}={}'.format(fnr, key, val))
fnr += 1
f = request.FILES[key]
res.append('content_type={}'.format(f.content_type))
res.append('size={}B'.format(f.size))
fname = os.path.join(fpath, '{}'.format(val))
res.append('path={}'.format(fname))
with open(fname, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
r.filecount = fnr
r.save()
fname = os.path.join(fpath, 'request.txt')
with open(fname, 'wt+') as destination:
destination.write('\n'.join(res))
return res
@csrf_exempt
def obscure_dump_request_endpoint(request):
"""
Dump a HttpRequest to files in a directory.
"""
res = _dump_request_endpoint(request)
print('\n'.join(res)) # to console or stdout/stderr
return HttpResponse("OK, I dumped HTTP request data to a file.")
def _basicauth(request):
# Check for valid basic auth header
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
a = auth[1].encode('utf8')
s = base64.b64decode(a)
uname, passwd = s.decode('utf8').split(':')
user = authenticate(username=uname, password=passwd)
return uname, passwd, user
return None, None, None
@csrf_exempt
def basicauth_dump_request_endpoint(request):
"""
Dump a HttpRequest to files in a directory.
"""
uname, passwd, user = _basicauth(request)
print(uname, passwd, user)
if user is None:
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
response = HttpResponse('<h1>401 Unauthorized</h1> You need a valid user account '
'(username and password) to access this page.')
response.status_code = 401
BASIC_AUTH_REALM = 'test'
response['WWW-Authenticate'] = 'Basic realm="{}"'.format(BASIC_AUTH_REALM)
return response
else:
res = _dump_request_endpoint(request)
print('\n'.join(res))
return HttpResponse("OK, I dumped HTTP request data to a file.")
|
Python
| 0
|
@@ -441,16 +441,31 @@
ER_PORT'
+, 'REQUEST_URI'
%5D%0A%0A%0Adef
@@ -954,12 +954,17 @@
uest
-.raw
+_body.txt
')%0A
@@ -1118,16 +1118,88 @@
method))
+%0A res.append('Request full path: %7B%7D'.format(request.get_full_path()))
%0A%0A re
@@ -2266,16 +2266,24 @@
'request
+_headers
.txt')%0A
|
c0627c6d8d11a9b9597b8fecd10b562d46a71521
|
Send fio results to fio.sc.couchbase.com
|
perfrunner/tests/fio.py
|
perfrunner/tests/fio.py
|
from collections import defaultdict
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, test_config, verbose)
@staticmethod
def _parse(results):
"""Terse output parsing is based on the following guide:
https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
logger.info('IOPS: {}'.format(pretty_dict(self._parse(stats))))
|
Python
| 0
|
@@ -30,16 +30,32 @@
ltdict%0A%0A
+import requests%0A
from log
@@ -238,16 +238,185 @@
Test):%0A%0A
+ TRACKER = 'fio.sc.couchbase.com'%0A%0A TEMPLATE = %7B%0A 'group': '%7B%7D, random mixed reads and writes, IOPS',%0A 'metric': None,%0A 'value': None,%0A %7D%0A%0A
def
@@ -1066,16 +1066,526 @@
stats%0A%0A
+ def _post(self, data):%0A data = pretty_dict(data)%0A logger.info('Posting: %7B%7D'.format(data))%0A requests.post('http://%7B%7D/api/v1/benchmarks'.format(self.TRACKER),%0A data=data)%0A%0A def _report_kpi(self, stats):%0A for host, iops in stats.items():%0A data = self.TEMPLATE.copy()%0A data%5B'group'%5D = data%5B'group'%5D.format(self.cluster_spec.name.title())%0A data%5B'metric'%5D = host%0A data%5B'value'%5D = iops%0A%0A self._post(data)%0A%0A
def
@@ -1671,49 +1671,24 @@
-logger.info('IOPS: %7B%7D'.format(pretty_dict
+self._report_kpi
(sel
@@ -1703,11 +1703,9 @@
(stats))
-))
%0A
|
fee1b1f55567f5ef7ac4e8d78f531cf9780c9400
|
Fix typo. (#1509)
|
storage/cloud-client/notification_polling.py
|
storage/cloud-client/notification_polling.py
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to poll for GCS notifications from a
Cloud Pub/Sub subscription, parse the incoming message, and acknowledge the
successful processing of the message.
This application will work with any subscription configured for pull rather
than push notifications. If you do not already have notifications configured,
you may consult the docs at
https://cloud.google.com/storage/docs/reporting-changes or follow the steps
below:
1. First, follow the common setup steps for these snippets, specically
configuring auth and installing dependencies. See the README's "Setup"
section.
2. Activate the Google Cloud Pub/Sub API, if you have not already done so.
https://console.cloud.google.com/flows/enableapi?apiid=pubsub
3. Create a Google Cloud Storage bucket:
$ gsutil mb gs://testbucket
4. Create a Cloud Pub/Sub topic and publish bucket notifications there:
$ gsutil notification create -f json -t testtopic gs://testbucket
5. Create a subscription for your new topic:
$ gcloud beta pubsub subscriptions create testsubscription --topic=testtopic
6. Run this program:
$ python notification_polling.py my-project-id testsubscription
7. While the program is running, upload and delete some files in the testbucket
bucket (you could use the console or gsutil) and watch as changes scroll by
in the app.
"""
import argparse
import json
import time
from google.cloud import pubsub_v1
def summarize(message):
# [START parse_message]
data = message.data.decode('utf-8')
attributes = message.attributes
event_type = attributes['eventType']
bucket_id = attributes['bucketId']
object_id = attributes['objectId']
generation = attributes['objectGeneration']
description = (
'\tEvent type: {event_type}\n'
'\tBucket ID: {bucket_id}\n'
'\tObject ID: {object_id}\n'
'\tGeneration: {generation}\n').format(
event_type=event_type,
bucket_id=bucket_id,
object_id=object_id,
generation=generation)
if 'overwroteGeneration' in attributes:
description += '\tOverwrote generation: %s\n' % (
attributes['overwroteGeneration'])
if 'overwrittenByGeneration' in attributes:
description += '\tOverwritten by generation: %s\n' % (
attributes['ovewrittenByGeneration'])
payload_format = attributes['payloadFormat']
if payload_format == 'JSON_API_V1':
object_metadata = json.loads(data)
size = object_metadata['size']
content_type = object_metadata['contentType']
metageneration = object_metadata['metageneration']
description += (
'\tContent type: {content_type}\n'
'\tSize: {object_size}\n'
'\tMetageneration: {metageneration}\n').format(
content_type=content_type,
object_size=size,
metageneration=metageneration)
return description
# [END parse_message]
def poll_notifications(project, subscription_name):
"""Polls a Cloud Pub/Sub subscription for new GCS events for display."""
# [BEGIN poll_notifications]
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project, subscription_name)
def callback(message):
print('Received message:\n{}'.format(summarize(message)))
message.ack()
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking, so we must keep the main thread from
# exiting to allow it to process messages in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END poll_notifications]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'project',
help='The ID of the project that owns the subscription')
parser.add_argument('subscription',
help='The ID of the Pub/Sub subscription')
args = parser.parse_args()
poll_notifications(args.project, args.subscription)
|
Python
| 0.997978
|
@@ -2954,16 +2954,17 @@
tes%5B'ove
+r
writtenB
|
69038348a0e029d2b06c2753a0dec9b2552ed820
|
Add license header to __init__.py
|
openquake/__init__.py
|
openquake/__init__.py
|
"""
OpenGEM is an open-source platform for the calculation of hazard, risk,
and socio-economic impact. It is a project of the Global Earthquake Model,
nd may be extended by other organizations to address additional classes
of peril.
For more information, please see the website at http://www.globalquakemodel.org
This software may be downloaded at http://github.com/gem/openquake
The continuous integration server is at http://openquake.globalquakemodel.org
Up-to-date sphinx documentation is at http://openquake.globalquakemodel.org/docs
This software is licensed under the LGPL license, for more details
please see the LICENSE file.
Copyright (c) 2010, GEM Foundation.
"""
|
Python
| 0.000018
|
@@ -673,12 +673,685 @@
ation.%0A%0A
+ This program is free software: you can redistribute it and/or modify%0A it under the terms of the GNU Lesser General Public License as published by%0A the Free Software Foundation, either version 3 of the License, or%0A (at your option) any later version.%0A%0A This program is distributed in the hope that it will be useful,%0A but WITHOUT ANY WARRANTY; without even the implied warranty of%0A MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A GNU Lesser General Public License for more details.%0A%0A You should have received a copy of the GNU Lesser General Public License%0A along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A
%22%22%22%0A
|
9815f575f882ac54f4633a2899513dc492bd47e3
|
Update optimizeHyper.py
|
oppa/optimizeHyper.py
|
oppa/optimizeHyper.py
|
import numpy as np
import math
#from macs.learnMACSparam import run as learnMACparam
from BayesianOptimization.bayes_opt.bayesian_optimization import BayesianOptimization
#function for testing Bayesian optimization.
def test_function(y):
return (y-50)**2
def optimized_function(function, error, *param):
"""
this function will wrap black box function for bayesian optimization.
now, we denote vector like X = { x1, x2, ... , xn } to capitalize
like this A = { a1, a2, ... , an }. so in our concept, X is a
input parameter of peak calling algorithms and y=f(X) is error rate
about those input. we can make abstraction of that functions.
:param function:
the function which you want to wrap.
e. g. : in MACS, can f : macs.learnMACSparam.run()
:param error:
result error rate of input some parameter vector X = {p1,p2,p3}
e. g. : in MACS, can X = { q value, mfold }
:param param:
input parameters or parameter that it will be input to some Peak
detection algorithm. it is denoted X = { p1, p2, p3 }
:return:
"""
return None
def run(function, Param_bound, init_point):
"""
Doing Bayesian optimization with some function. the function is just process that
input the file and parameter to Peak Detection algorithm and get some error.
X : parameters
Y : errors
f : X -> Y
in MACS case, for example, run(args) in learnMACSparam.py can be optimized-function.
or you can wrap that function. ( f : learnMACSparam.run() . . . etc )
and also, because we use "BayesianOptimization" module if you update or modify or read
this code, you should keep refer that module.
:param function:
function will be optimized.
:param Param_bound:
this Parameter would be boundary of parameter will be learned. and it must be
python tuple : (min,max)
:return:
"""
optimizer = BayesianOptimization(function, Param_bound, init_point)
"""
In the Bayesian Optimization Class`s field :
keys : Error Value of each parameters ( List : [] )
dim : number of parameters ( int : 0-n )
bounds : boundary of parameters to List ( List : [] )
X : Numpy array place holders ( Numpy Array )
Y : Numpy array place holders ( Numpy Array )
gp : Class Object of GaussianProcessRegressor ,
set the Kernel function and others ( Class GaussianProcessRegressor )
util : choose your utility function ( function )
res : output (result) dictionary . field is
self.res['max'] = {'max_val': None, 'max_params': None}
self.res['all'] = {'values': [], 'params': []}
and etc...
and you can do optimization by maximize() Method,
you can initialize data frame ( table of data structure ) by initialize_df method.
"""
optimizer.init(init_point)
optimizer.maximize(Param_bound, acq = 'ei')
optimizer.points_to_csv("result")
#code for test you just run this script
# number of random generate sample.
init_point = 3
Param_bound = {'y' : (0, 100.0)}
run(test_function, Param_bound, init_point)
|
Python
| 0.000001
|
@@ -1904,16 +1904,137 @@
n,max)%0A%0A
+ :param init_point:%0A this parameter decide number of sample which randomly generated for first state.%0A %0A
:ret
@@ -2038,16 +2038,21 @@
return:%0A
+ %0A
%22%22%22%0A
@@ -3359,28 +3359,29 @@
on, Param_bound, init_point)
+%0A
|
bab1ad914ab9273aa8ab905edef2578b5c760f31
|
add django installed apps init opps core
|
opps/core/__init__.py
|
opps/core/__init__.py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
trans_app_label = _('Opps')
|
Python
| 0
|
@@ -77,35 +77,304 @@
s _%0A
-%0A%0A%0Atrans_app_label = _('Opps')
+from django.conf import settings%0A%0A%0A%0Atrans_app_label = _('Opps')%0A%0Asettings.INSTALLED_APPS += ('opps.article',%0A 'opps.image',%0A 'opps.channel',%0A 'opps.source',%0A 'redactor',%0A 'tagging',)%0A%0Asettings.REDACTOR_OPTIONS = %7B'lang': 'en'%7D%0Asettings.REDACTOR_UPLOAD = 'uploads/'
%0A
|
347bb827151e8efe1e59683da128215027253245
|
Version bump.
|
orchestra/__init__.py
|
orchestra/__init__.py
|
# The current Orchestra version.
__version__ = '0.1.41'
default_app_config = 'orchestra.apps.OrchestraAppConfig'
|
Python
| 0
|
@@ -46,17 +46,17 @@
= '0.1.4
-1
+2
'%0A%0Adefau
|
fdea91164a145474d0ce093420aeace592cc57a6
|
Update RESOURCE_TYPE_MAP and datacite format subjects
|
osf/metadata/utils.py
|
osf/metadata/utils.py
|
from website import settings
SUBJECT_SCHEME = 'bepress Digital Commons Three-Tiered Taxonomy'
RESOURCE_TYPE_MAP = {
'Audio/Video': 'Audiovisual',
'Dataset': 'Dataset',
'Image': 'Image',
'Model': 'Model',
'Software': 'Software',
'Book': 'Text',
'Funding Submission': 'Text',
'Journal Article': 'Text',
'Lesson': 'Text',
'Poster': 'Text',
'Preprint': 'Text',
'Presentation': 'Text',
'Research Tool': 'Text',
'Thesis': 'Text',
'Other': 'Text',
'(unas)': 'Other'
}
def datacite_format_contributors(contributors):
"""
contributors_list: list of OSFUsers to format
returns: formatted json for datacite
"""
creators = []
for contributor in contributors:
name_identifiers = [
{
'nameIdentifier': contributor.absolute_url,
'nameIdentifierScheme': 'OSF',
'schemeURI': settings.DOMAIN
}
]
if contributor.external_identity.get('ORCID'):
verified = contributor.external_identity['ORCID'].values()[0] == 'VERIFIED'
if verified:
name_identifiers.append({
'nameIdentifier': contributor.external_identity['ORCID'].keys()[0],
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
})
creators.append({
'creatorName': {
'creatorName': contributor.fullname,
'familyName': contributor.family_name,
'givenName': contributor.given_name
},
'nameIdentifiers': name_identifiers
})
return creators
def datacite_format_subjects(subjects):
return [
{
'subject': subject,
'subjectScheme': SUBJECT_SCHEME
}
for subject in subjects
]
def datacite_format_identifier(target):
identifier = target.get_identifier('doi')
if identifier:
return {
'identifier': identifier.value,
'identifierType': 'DOI'
}
def datacite_format_rights(license):
return {
'rights': license.name,
'rightsURI': license.url
}
|
Python
| 0
|
@@ -503,16 +503,17 @@
,%0A '(
+:
unas)':
@@ -1784,16 +1784,82 @@
subject
+.bepress_subject.text if subject.bepress_subject else subject.text
,%0A
|
ff60b8e2cb89e1c4d6c75af79d0199d142c6e2b4
|
fix TypeError: memoryview: a bytes-like object is required, not 'str'
|
ouimeaux/subscribe.py
|
ouimeaux/subscribe.py
|
from collections import defaultdict
import logging
from xml.etree import cElementTree
from functools import partial
import gevent
from gevent.pywsgi import WSGIServer
from ouimeaux.utils import get_ip_address, requests_request
from ouimeaux.device.insight import Insight
from ouimeaux.device.maker import Maker
from ouimeaux.signals import subscription
from random import randint
log = logging.getLogger(__name__)
NS = "{urn:schemas-upnp-org:event-1-0}"
SUCCESS = '<html><body><h1>200 OK</h1></body></html>'
class SubscriptionRegistry(object):
def __init__(self):
self._devices = {}
self._callbacks = defaultdict(list)
self.port = randint(8300, 8990)
def register(self, device):
if not device:
log.error("Received an invalid device: %r", device)
return
log.info("Subscribing to basic events from %r", device)
# Provide a function to register a callback when the device changes
# state
device.register_listener = partial(self.on, device, 'BinaryState')
self._devices[device.host] = device
self._resubscribe(device.basicevent.eventSubURL)
def _resubscribe(self, url, sid=None):
headers = {'TIMEOUT': 'Second-%d' % 1800}
if sid is not None:
headers['SID'] = sid
else:
host = get_ip_address()
headers.update({
"CALLBACK": '<http://%s:%d>'%(host, self.port),
"NT": "upnp:event"
})
response = requests_request(method="SUBSCRIBE", url=url,
headers=headers)
if response.status_code == 412 and sid:
# Invalid subscription ID. Send an UNSUBSCRIBE for safety and
# start over.
requests_request(method='UNSUBSCRIBE', url=url,
headers={'SID': sid})
return self._resubscribe(url)
timeout = int(response.headers.get('timeout', '1801').replace(
'Second-', ''))
sid = response.headers.get('sid', sid)
gevent.spawn_later(int(timeout * 0.75), self._resubscribe, url, sid)
def _handle(self, environ, start_response):
device = self._devices.get(environ['REMOTE_ADDR'])
if device is not None:
data = environ['wsgi.input'].read()
# trim garbage from end, if any
data = data.split("\n\n")[0]
doc = cElementTree.fromstring(data)
for propnode in doc.findall('./{0}property'.format(NS)):
for property_ in propnode.getchildren():
text = property_.text
if isinstance(device, Insight) and property_.tag=='BinaryState':
text = text.split('|')[0]
subscription.send(device, type=property_.tag, value=text)
self._event(device, property_.tag, text)
start_response('200 OK', [
('Content-Type', 'text/html'),
('Content-Length', str(len(SUCCESS))),
('Connection', 'close')
])
yield SUCCESS
def _event(self, device, type_, value):
for t, callback in self._callbacks.get(device, ()):
if t == type_:
callback(value)
def on(self, device, type, callback):
self._callbacks[device].append((type, callback))
@property
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
server = WSGIServer(('', self.port), self._handle, log=None)
self._server = server
return server
|
Python
| 0.000294
|
@@ -508,16 +508,50 @@
/html%3E'%0A
+SUCCESS_BINARY = SUCCESS.encode()%0A
%0A%0Aclass
@@ -3136,16 +3136,23 @@
SUCCESS
+_BINARY
%0A%0A de
|
f5275dba7285a97b05ac3abb756897ba75f119c5
|
remove execute permission
|
p038_conut_and_say.py
|
p038_conut_and_say.py
|
#!/usr/bin/python
# -*- utf-8 -*-
class Solution:
def __init__(self, init='1'):
self._list = [init]
# @return a string
def countAndSay(self, n):
while len(self._list) < n:
self._list.append(self.say(self._list[-1]))
return self._list[n-1]
@staticmethod
def say(string):
ret = []
save = None
count = 1
for c in string:
if save is None:
save = c
elif c == save:
count += 1
else:
ret.append('%d%s' % (count, save))
save = c
count = 1
ret.append('%d%s' % (count, save))
return ''.join(ret)
if __name__ == '__main__':
solution = Solution()
print(solution.countAndSay(10))
print(solution._list)
|
Python
| 0.000004
| |
f06c19d0fc686a915d002124893e35209836eba4
|
Make TestCase.func_code() private.
|
subversion/tests/cmdline/svntest/testcase.py
|
subversion/tests/cmdline/svntest/testcase.py
|
#!/usr/bin/env python
#
# testcase.py: Control of test case execution.
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2004 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
import os, sys, string
import traceback # for print_exc()
import svntest
__all__ = ['TestCase', 'XFail', 'Skip']
class SVNTestStatusCodeError(Exception):
'Test driver returned a status code.'
pass
class _Predicate:
"""A general-purpose predicate that encapsulates a test case (function),
a condition for its execution and a set of display properties for test
lists and test log output."""
def __init__(self, func):
if isinstance(func, _Predicate):
# Whee, this is better than blessing objects in Perl!
# For the unenlightened: What we're doing here is adopting the
# identity *and class* of 'func'
self.__dict__ = func.__dict__
self.__class__ = func.__class__
else:
self.func = func
self.cond = 0
self.text = ['PASS: ', 'FAIL: ', 'SKIP: ', '']
assert type(self.func) is type(lambda x: 0)
def list_mode(self):
return self.text[3]
def skip_text(self):
return self.text[2]
def run_text(self, result=0):
return self.text[result]
def convert_result(self, result):
return result
class TestCase:
"""Encapsulate a single test case (predicate), including logic for
runing the test and test list output."""
def __init__(self, func, index):
self.pred = _Predicate(func)
self.index = index
def _check_name(self):
name = self.pred.func.__doc__
if not name:
raise Exception(self.pred.func.__name__ + ' lacks required doc string')
if len(name) > 50:
print 'WARNING: Test doc string exceeds 50 characters'
if name[-1] == '.':
print 'WARNING: Test doc string ends in a period (.)'
if not string.lower(name[0]) == name[0]:
print 'WARNING: Test doc string is capitalized'
def need_sandbox(self):
return self.func_code().co_argcount != 0
def get_sandbox_name(self):
filename = self.func_code().co_filename
return os.path.splitext(os.path.basename(filename))[0]
def func_code(self):
return self.pred.func.func_code
def list(self):
print " %2d %-5s %s" % (self.index,
self.pred.list_mode(),
self.pred.func.__doc__)
self._check_name()
def _print_name(self):
print os.path.basename(sys.argv[0]), str(self.index) + ":", \
self.pred.func.__doc__
self._check_name()
def run(self, args):
"""Run self.pred on ARGS, return the result. The return value is
- 0 if the test was successful
- 1 if it errored in a way that indicates test failure
- 2 if the test skipped
"""
result = 0
if self.pred.cond:
print self.pred.skip_text(),
else:
try:
rc = apply(self.pred.func, args)
if rc is not None:
raise SVNTestStatusCodeError
except SVNTestStatusCodeError, ex:
print "STYLE ERROR in",
self._print_name()
print ex.__doc__
sys.exit(255)
except svntest.Skip, ex:
result = 2
except svntest.Failure, ex:
result = 1
# We captured Failure and its subclasses. We don't want to print
# anything for plain old Failure since that just indicates test
# failure, rather than relevant information. However, if there
# *is* information in the exception's arguments, then print it.
if ex.__class__ != svntest.Failure or ex.args:
ex_args = str(ex)
if ex_args:
print 'EXCEPTION: %s: %s' % (ex.__class__.__name__, ex_args)
else:
print 'EXCEPTION:', ex.__class__.__name__
except KeyboardInterrupt:
print 'Interrupted'
sys.exit(0)
except SystemExit, ex:
print 'EXCEPTION: SystemExit(%d), skipping cleanup' % ex.code
print ex.code and 'FAIL: ' or 'PASS: ',
self._print_name()
raise
except:
result = 1
print 'UNEXPECTED EXCEPTION:'
traceback.print_exc(file=sys.stdout)
print self.pred.run_text(result),
result = self.pred.convert_result(result)
self._print_name()
sys.stdout.flush()
return result
class XFail(_Predicate):
"A test that is expected to fail."
def __init__(self, func):
_Predicate.__init__(self, func)
self.text[0] = 'XPASS:'
self.text[1] = 'XFAIL:'
if self.text[3] == '':
self.text[3] = 'XFAIL'
def convert_result(self, result):
# Conditions are reversed here: a failure expected, therefore it
# isn't an error; a pass is an error.
return not result
class Skip(_Predicate):
"A test that will be skipped when a condition is true."
def __init__(self, func, cond):
_Predicate.__init__(self, func)
self.cond = cond
if self.cond:
self.text[3] = 'SKIP'
### End of file.
|
Python
| 0.000001
|
@@ -2458,24 +2458,25 @@
return self.
+_
func_code().
@@ -2539,24 +2539,25 @@
name = self.
+_
func_code().
@@ -2634,16 +2634,17 @@
%0A%0A def
+_
func_cod
|
c94552bfa70fdb57eb12f724cd6faa6ae9c0d89c
|
Allow passing None for username in v2.Password
|
keystoneclient/auth/identity/v2.py
|
keystoneclient/auth/identity/v2.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from oslo.config import cfg
import six
from keystoneclient import access
from keystoneclient.auth.identity import base
from keystoneclient import exceptions
from keystoneclient import utils
_logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Auth(base.BaseIdentityPlugin):
@classmethod
def get_options(cls):
options = super(Auth, cls).get_options()
options.extend([
cfg.StrOpt('tenant-id', help='Tenant ID'),
cfg.StrOpt('tenant-name', help='Tenant Name'),
cfg.StrOpt('trust-id', help='Trust ID'),
])
return options
@utils.positional()
def __init__(self, auth_url,
trust_id=None,
tenant_id=None,
tenant_name=None,
reauthenticate=True):
"""Construct an Identity V2 Authentication Plugin.
:param string auth_url: Identity service endpoint for authorization.
:param string trust_id: Trust ID for trust scoping.
:param string tenant_id: Tenant ID for project scoping.
:param string tenant_name: Tenant name for project scoping.
:param bool reauthenticate: Allow fetching a new token if the current
one is going to expire.
(optional) default True
"""
super(Auth, self).__init__(auth_url=auth_url,
reauthenticate=reauthenticate)
self.trust_id = trust_id
self.tenant_id = tenant_id
self.tenant_name = tenant_name
def get_auth_ref(self, session, **kwargs):
headers = {'Accept': 'application/json'}
url = self.auth_url.rstrip('/') + '/tokens'
params = {'auth': self.get_auth_data(headers)}
if self.tenant_id:
params['auth']['tenantId'] = self.tenant_id
elif self.tenant_name:
params['auth']['tenantName'] = self.tenant_name
if self.trust_id:
params['auth']['trust_id'] = self.trust_id
_logger.debug('Making authentication request to %s', url)
resp = session.post(url, json=params, headers=headers,
authenticated=False, log=False)
try:
resp_data = resp.json()['access']
except (KeyError, ValueError):
raise exceptions.InvalidResponse(response=resp)
return access.AccessInfoV2(**resp_data)
@abc.abstractmethod
def get_auth_data(self, headers=None):
"""Return the authentication section of an auth plugin.
:param dict headers: The headers that will be sent with the auth
request if a plugin needs to add to them.
:return dict: A dict of authentication data for the auth type.
"""
class Password(Auth):
@utils.positional(4)
def __init__(self, auth_url, username=None, password=None, user_id=None,
**kwargs):
"""A plugin for authenticating with a username and password.
A username or user_id must be provided.
:param string auth_url: Identity service endpoint for authorization.
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string user_id: User ID for authentication.
:raises TypeError: if a user_id or username is not provided.
"""
super(Password, self).__init__(auth_url, **kwargs)
if not (user_id or username):
msg = 'You need to specify either a username or user_id'
raise TypeError(msg)
self.user_id = user_id
self.username = username
self.password = password
def get_auth_data(self, headers=None):
auth = {'password': self.password}
if self.username:
auth['username'] = self.username
elif self.user_id:
auth['userId'] = self.user_id
return {'passwordCredentials': auth}
@classmethod
def get_options(cls):
options = super(Password, cls).get_options()
options.extend([
cfg.StrOpt('user-name',
dest='username',
deprecated_name='username',
help='Username to login with'),
cfg.StrOpt('user-id', help='User ID to longin with'),
cfg.StrOpt('password', secret=True, help='Password to use'),
])
return options
class Token(Auth):
def __init__(self, auth_url, token, **kwargs):
"""A plugin for authenticating with an existing token.
:param string auth_url: Identity service endpoint for authorization.
:param string token: Existing token for authentication.
"""
super(Token, self).__init__(auth_url, **kwargs)
self.token = token
def get_auth_data(self, headers=None):
if headers is not None:
headers['X-Auth-Token'] = self.token
return {'token': {'id': self.token}}
@classmethod
def get_options(cls):
options = super(Token, cls).get_options()
options.extend([
cfg.StrOpt('token', secret=True, help='Token'),
])
return options
|
Python
| 0.000006
|
@@ -3372,16 +3372,41 @@
%22%22%22%0A%0A%0A
+_NOT_PASSED = object()%0A%0A%0A
class Pa
@@ -3487,20 +3487,27 @@
sername=
-None
+_NOT_PASSED
, passwo
@@ -3518,22 +3518,8 @@
one,
- user_id=None,
%0A
@@ -3531,16 +3531,37 @@
+ user_id=_NOT_PASSED,
**kwarg
@@ -4098,33 +4098,58 @@
if
-not (user_id or username)
+username is _NOT_PASSED and user_id is _NOT_PASSED
:%0A
@@ -4215,16 +4215,16 @@
ser_id'%0A
-
@@ -4249,16 +4249,143 @@
r(msg)%0A%0A
+ if username is _NOT_PASSED:%0A username = None%0A if user_id is _NOT_PASSED:%0A user_id = None%0A%0A
|
de29012d0bf48cf970ad37c62d7db960161f14c0
|
Remove unused stat import
|
core/dovecot/start.py
|
core/dovecot/start.py
|
#!/usr/bin/python3
import os
import stat
import glob
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(8)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
run_server(0, "dovecot", "/tmp/podop.socket", [
("quota", "url", url ),
("auth", "url", url),
("sieve", "url", url),
])
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334")
if os.environ["WEBMAIL"] != "none":
os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
os.makedirs("/conf/bin", exist_ok=True)
for script_file in glob.glob("/conf/*.script"):
out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
conf.jinja(script_file, os.environ, out_file)
os.chmod(out_file, 0o555)
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
os.system("chown -R mail:mail /var/lib/dovecot /conf")
os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
|
Python
| 0
|
@@ -27,20 +27,8 @@
os%0A
-import stat%0A
impo
|
b61c907a49e20da6148828aa512c86b58c0312c1
|
use the real index info in the sql pillow
|
corehq/pillows/sms.py
|
corehq/pillows/sms.py
|
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from corehq.elastic import get_es_new
from corehq.apps.sms.models import SMSLog
from corehq.pillows.mappings.sms_mapping import SMS_MAPPING, SMS_INDEX, SMS_META, SMS_TYPE
from dimagi.utils.decorators.memoized import memoized
from pillowtop.checkpoints.manager import PillowCheckpoint, PillowCheckpointEventHandler
from pillowtop.es_utils import ElasticsearchIndexInfo
from pillowtop.listener import AliasedElasticPillow
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors.elastic import ElasticProcessor
SMS_PILLOW_CHECKPOINT_ID = 'sql-sms-to-es'
SMS_PILLOW_KAFKA_CONSUMER_GROUP_ID = 'sql-sms-to-es'
ES_SMS_INDEX = SMS_INDEX
class SMSPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = SMSLog # while this index includes all users,
# I assume we don't care about querying on properties specfic to WebUsers
couch_filter = "sms/all_logs"
es_timeout = 60
es_alias = "smslogs"
es_type = SMS_TYPE
es_meta = SMS_META
es_index = ES_SMS_INDEX
default_mapping = SMS_MAPPING
@classmethod
@memoized
def calc_meta(cls):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return cls.calc_mapping_hash({"es_meta": cls.es_meta, "mapping": cls.default_mapping})
def change_transport(self, doc_dict):
# SMS changes don't go to couch anymore. Let the SqlSMSPillow process
# changes from now on.
# Also, we explicitly need this to be a no-op because we're going to
# delete all sms from couch and don't want them to be deleted from
# elasticsearch.
return
def get_sql_sms_pillow(pillow_id):
checkpoint = PillowCheckpoint(SMS_PILLOW_CHECKPOINT_ID)
processor = ElasticProcessor(
elasticsearch=get_es_new(),
index_info=ElasticsearchIndexInfo(index=ES_SMS_INDEX, type=ES_SMS_TYPE),
doc_prep_fn=lambda x: x
)
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topics=[topics.SMS], group_id=SMS_PILLOW_KAFKA_CONSUMER_GROUP_ID),
processor=processor,
change_processed_event_handler=PillowCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100,
),
)
|
Python
| 0
|
@@ -2135,68 +2135,22 @@
nfo=
-ElasticsearchIndexInfo(index=ES_SMS_INDEX, type=ES_SMS_TYPE)
+SMS_INDEX_INFO
,%0A
|
2487a33c2abc62de579ac615cfd8b58216522b65
|
add cfgfiles as a generic application attribute
|
coshsh/application.py
|
coshsh/application.py
|
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
#
# Copyright 2010-2012 Gerhard Lausser.
# This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import os
import imp
import inspect
import logging
from util import compare_attr, is_attr
from item import Item
from templaterule import TemplateRule
logger = logging.getLogger('coshsh')
class ApplicationNotImplemented(Exception):
pass
class Application(Item):
id = 1 #0 is reserved for host (primary node for parents)
my_type = 'application'
app_template = "app.tpl"
class_factory = []
lower_columns = ['name', 'type', 'component', 'version', 'patchlevel']
def __init__(self, params):
#print "Application init", self.__class__, self.__class__.__name__, len(self.__class__.class_factory)
if self.__class__.__name__ == "Application":
for c in self.__class__.lower_columns:
try:
params[c] = params[c].lower()
except Exception:
if c in params:
params[c] = None
newcls = self.__class__.get_class(params)
if newcls:
self.__class__ = newcls
self.contact_groups = []
super(Application, self).__init__(params)
self.__init__(params)
self.fingerprint = lambda s=self:s.__class__.fingerprint(params)
else:
logger.debug("this will be Generic: %s" % params)
self.__class__ = GenericApplication
self.contact_groups = []
super(Application, self).__init__(params)
self.__init__(params)
#raise ApplicationNotImplemented
self.fingerprint = lambda s=self:s.__class__.fingerprint(params)
else:
pass
@classmethod
def fingerprint(self, params={}):
return "%s+%s+%s" % (params["host_name"], params["name"], params["type"])
def _i_init__(self, params={}):
super(Application, self).__init__(params)
self.contact_groups = []
def create_servicegroups(self):
pass
def create_contacts(self):
pass
def create_templates(self):
pass
@classmethod
def init_classes(cls, classpath):
for p in [p for p in reversed(classpath) if os.path.exists(p) and os.path.isdir(p)]:
for module, path in [(item, p) for item in os.listdir(p) if item[-3:] == ".py" and (item.startswith('app_') or item.startswith('os_'))]:
try:
path = os.path.abspath(path)
fp, filename, data = imp.find_module(module.replace('.py', ''), [path])
toplevel = imp.load_module('', fp, '', ('py', 'r', imp.PY_SOURCE))
for cl in inspect.getmembers(toplevel, inspect.isfunction):
if cl[0] == "__mi_ident__":
cls.class_factory.append([path, module, cl[1]])
except Exception, e:
print e
finally:
if fp:
fp.close()
#print ".............fill %s / %s woth %s" % (cls, cls.__name__, cls.class_factory)
@classmethod
def get_class(cls, params={}):
#print "getclass from cache", cls, cls.__name__, cls.class_factory
for path, module, class_func in cls.class_factory:
try:
#print "get_class trys", path, module, class_func
newcls = class_func(params)
#print "get_class says", newcls
if newcls:
return newcls
except Exception:
pass
logger.debug("found no matching class for this monitoring item %s" % params)
return None
class GenericApplication(Application):
template_rules = [
TemplateRule(needsattr=None,
template="app_generic_default",
unique_attr=['type', 'name'], unique_config="app_%s_%s_default"),
]
def x__new__(cls, params={}):
return object.__new__(cls)
def __init__(self, params={}):
self.name = params["name"]
super(GenericApplication, self).__init__(params)
def render(self, template_cache, jinja2):
# Maybe we find some processes, ports, filesystems in the
# monitoring_details so we can output generic services
if (hasattr(self, "processes") and self.processes) or (hasattr(self, "filesystems") and self.filesystems) or (hasattr(self, "ports") and self.ports):
super(GenericApplication, self).render(template_cache, jinja2)
else:
return ()
|
Python
| 0
|
@@ -1489,17 +1489,17 @@
r.debug(
-%22
+'
this wil
@@ -1514,13 +1514,12 @@
eric
-:
%25s
-%22
+'
%25 p
@@ -4590,16 +4590,108 @@
tr(self,
+ %22cfgfiles%22) and self.cfgfiles) or (hasattr(self, %22files%22) and self.files) or (hasattr(self,
%22ports%22
|
e8663deb33bcf2ea4d6a43436016d3459103f337
|
Clarify usage of the 'port' parameter
|
couchbase/__init__.py
|
couchbase/__init__.py
|
#
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from couchbase.connection import Connection
from couchbase.user_constants import *
import couchbase._libcouchbase as _LCB
def set_json_converters(encode, decode):
"""
Modify the default JSON conversion functions. This affects all
:class:`~couchbase.connection.Connection` instances.
These functions will called instead of the default ones (``json.dumps``
and ``json.loads``) to encode and decode JSON (when :const:`FMT_JSON` is
used).
:param callable encode: Callable to invoke when encoding an object to JSON.
This should have the same prototype as ``json.dumps``, with the
exception that it is only ever passed a single argument.
:param callable decode: Callable to invoke when decoding an object to JSON.
This should have the same prototype and behavior
as ``json.loads`` with the exception that it is only ever
passed a single argument.
:return: A tuple of ``(old encoder, old decoder)``
No exceptions are raised, and it is the responsibility of the caller to
ensure that the provided functions operate correctly, otherwise exceptions
may be thrown randomly when encoding and decoding values
"""
ret = _LCB._modify_helpers(json_encode=encode, json_decode=decode)
return (ret['json_encode'], ret['json_decode'])
def set_pickle_converters(encode, decode):
"""
Modify the default Pickle conversion functions. This affects all
:class:`~couchbase.connection.Connection` instances.
These functions will be called instead of the default ones
(``pickle.dumps`` and ``pickle.loads``) to encode and decode values to and
from the Pickle format (when :const:`FMT_PICKLE` is used).
:param callable encode: Callable to invoke when encoding an object to
Pickle. This should have the same prototype as ``pickle.dumps`` with
the exception that it is only ever called with a single argument
:param callable decode: Callable to invoke when decoding a Pickle encoded
object to a Python object. Should have the same prototype as
``pickle.loads`` with the exception that it is only ever passed a
single argument
:return: A tuple of ``(old encoder, old decoder)``
No exceptions are raised and it is the responsibility of the caller to
ensure that the provided functions operate correctly.
"""
ret = _LCB._modify_helpers(pickle_encode=encode, pickle_decode=decode)
return (ret['pickle_encode'], ret['pickle_decode'])
class Couchbase:
"""The base class for interacting with Couchbase"""
@staticmethod
def connect(bucket=None,
host='localhost',
port=8091,
username=None,
password=None,
quiet=False,
conncache=None,
unlock_gil=True,
timeout=2.5,
transcoder=None,
lockmode=LOCKMODE_EXC,
**kwargs):
"""Connect to a bucket.
If `username` is not given but `password` is specified,
it will automatically set to the bucket name, as it is
expected that you try to connect to a SASL
protected bucket, where the username is equal to the bucket
name.
:param host: the hostname or IP address of the node.
This can be a list or tuple of multiple nodes; the nodes can either
be simple strings, or (host, port) tuples (in which case the `port`
parameter from the method arguments is ignored).
:type host: string or list
:param number port: port of the management API
:param string username: the user name to connect to the cluster.
It's the username of the management API.
The username could be skipped for
protected buckets, the bucket name will
be used instead.
:param string password: the password of the user or bucket
:param string bucket: the bucket name
:param boolean quiet: the flag controlling whether to raise an
exception when the client executes operations on non-existent
keys. If it is `False` it will raise
:exc:`couchbase.exceptions.NotFoundError` exceptions. When set
to `True` the operations will return `None` silently.
:param string conncache: If set, this will refer to a path on the
filesystem where cached "bootstrap" information may be stored. This
path may be shared among multiple instance of the Couchbase client.
Using this option may reduce overhead when using many short-lived
instances of the client.
:param boolean unlock_gil: If set (which is the default), the
connection object will release the python GIL when possible, allowing
other (Python) threads to function in the background. This should be
set to true if you are using threads in your application (and is the
default), as otherwise all threads will be blocked while couchbase
functions execute.
You may turn this off for some performance boost and you are certain
your application is not using threads
:param float timeout:
Set the timeout in seconds. If an operation takes longer than this
many seconds, the method will return with an error. You may set this
higher if you have slow network conditions.
:param transcoder:
Set the transcoder object to use. This should conform to the
interface in the documentation (it need not actually be a subclass).
This can be either a class type to instantiate, or an initialized
instance.
:type transcoder: :class:`couchbase.transcoder.Transcoder`
:param lockmode:
The *lockmode* for threaded access. See :ref:`multiple_threads`
for more information.
:raise: :exc:`couchbase.exceptions.BucketNotFoundError` if there
is no such bucket to connect to
:exc:`couchbase.exceptions.ConnectError` if the socket
wasn't accessible (doesn't accept connections or doesn't
respond in time)
:exc:`couchbase.exceptions.ArgumentError`
if the bucket wasn't specified
:return: instance of :class:`couchbase.connection.Connection`
Initialize connection using default options::
from couchbase import Couchbase
cb = Couchbase.connect(bucket='mybucket')
Connect to protected bucket::
cb = Couchbase.connect(password='secret', bucket='protected')
Connect to a different server on the default port 8091::
cb = Couchbase.connect(host='example.com', username='admin',
password='secret', bucket='mybucket')
"""
return Connection(host=host,
port=port,
username=username,
password=password,
bucket=bucket,
conncache=conncache,
unlock_gil=unlock_gil,
timeout=timeout,
transcoder=transcoder,
quiet=quiet,
**kwargs)
|
Python
| 0.000081
|
@@ -4233,16 +4233,572 @@
ment API
+.%0A%0A .. note::%0A%0A The value specified here is the same port used to access%0A The couchbase REST UI (typically %608091%60). If you have selcted%0A an alternate port for your bucket, do *not* put it here. The%0A configuration information obtained via the REST interface will%0A automatically instruct the client (one %60%60connect()%60%60 is called)%0A about which bucket port to connect to. Note that bucket ports%0A are typically %60%60112xx%60%60 - don't use these for the %60port%60%0A parameter.
%0A%0A
|
328f1b5a8997432e8216657f45c7b34d94cfb2f4
|
Create Mark the Human by default ;)
|
create_default_dbs.py
|
create_default_dbs.py
|
#!/usr/bin/python
import games_mgr as gm_m
import openings_book as ol_m
import human_player as h_m
import players_mgr as pm_m
import ai_genome as aig_m
from defines import *
import sys
import os
def dot():
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == "__main__":
print "Creating Human Players"
pm = pm_m.PlayersMgr()
for name in ["BC", "Bruce", "Jespah", "Arwen", "Sascha"]:
h = h_m.HumanPlayer(name)
dot()
pm.save(h)
genome = aig_m.AIGenome("")
players = [
{ "p_name": "Deep Thunk", "use_openings_book": True, "max_depth": 10,
"mmpdl": 9, "vision": 100, "capture_score_base": 300 },
{ "p_name": "Pentachov", "use_openings_book": True, "max_depth": 8,
"mmpdl": 9, "vision": 100, "capture_score_base": 300 },
{ "p_name": "Killer", "use_openings_book": True, "max_depth": 6,
"mmpdl": 9, "vision": 100, "capture_score_base": 300 },
{ "p_name": "Sonja", "use_openings_book": True, "max_depth": 4,
"mmpdl": 9, "vision": 100, "capture_score_base": 300 },
{ "p_name": "Renaldo", "use_openings_book": True, "max_depth": 2,
"mmpdl": 12, "vision": 100, "capture_score_base": 300 },
{ "p_name": "Stephanie", "use_openings_book": True, "max_depth": 6,
"mmpdl": 9, "vision": 90, "capture_score_base": 300 },
{ "p_name": "Professor", "use_openings_book": False, "max_depth": 10,
"mmpdl": 9, "vision": 100, "capture_score_base": 300 },
{ "p_name": "Andrea", "use_openings_book": False, "max_depth": 4,
"mmpdl": 9, "vision": 95, "capture_score_base": 300 },
{ "p_name": "Tamazin", "use_openings_book": False, "max_depth": 4,
"mmpdl": 9, "vision": 90, "capture_score_base": 200 },
{ "p_name": "Wei", "use_openings_book": False, "max_depth": 4,
"mmpdl": 9, "vision": 85, "capture_score_base": 300 },
{ "p_name": "Gretel", "use_openings_book": False, "max_depth": 4,
"mmpdl": 9, "vision": 80, "capture_score_base": 400 },
{ "p_name": "JJ", "use_openings_book": False, "max_depth": 6,
"mmpdl": 4, "vision": 75, "capture_score_base": 300 },
{ "p_name": "Sam", "use_openings_book": False, "max_depth": 4,
"mmpdl": 9, "vision": 70, "capture_score_base": 300 },
{ "p_name": "Scott", "use_openings_book": False, "max_depth": 2,
"mmpdl": 6, "vision": 65, "capture_score_base": 300 },
{ "p_name": "Tony", "use_openings_book": False, "max_depth": 1,
"mmpdl": 6, "vision": 60, "capture_score_base": 400 },
]
for p in players:
genome.__dict__.update(p)
dot()
p = pm.find_by_name(genome.p_name, "Computer")
if p:
genome.p_key = p.p_key
else:
genome.p_key = pm.next_id()
pm.save(genome.clone())
|
Python
| 0
|
@@ -373,16 +373,24 @@
%22Bruce%22,
+ %22Mark%22,
%22Jespah
|
0fb6b22137cc183e2438d8155665724a577f51b2
|
Remove unused imports in soc.views.sitemap modules.
|
app/soc/views/sitemap/sidebar.py
|
app/soc/views/sitemap/sidebar.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module contains sidebar related functions.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import operator
from soc.views import out_of_band
from soc.views.helper import access
import soc.cache.sidebar
SIDEBAR = []
SIDEBAR_ACCESS_ARGS = ['SIDEBAR_CALLING']
SIDEBAR_ACCESS_KWARGS = {'SIDEBAR_CALLING': True}
def addMenu(callback):
"""Adds a callback to the menu builder.
The callback should return a list of menu's when called.
"""
global SIDEBAR
SIDEBAR.append(callback)
@soc.cache.sidebar.cache
def getSidebar(id, user):
"""Constructs a sidebar for the current user.
"""
sidebar = []
for callback in SIDEBAR:
menus = callback(id, user)
for menu in (menus if menus else []):
sidebar.append(menu)
return sorted(sidebar, key=lambda x: x.get('group'))
def getSidebarItems(params):
"""Retrieves a list of sidebar entries for this view.
Params usage:
The params dictionary is provided to the menu_text's format.
sidebar: The sidebar value is returned directly if non-False
sidebar_defaults: The sidebar_defaults are used to construct the
sidebar items for this View. It is expected to be a tuple of
three items, the item's url, it's menu_text, and it's
access_type, see getSidebarMenus on how access_type is used.
sidebar_additional: The sidebar_additional values are appended
to the list of items verbatim, and should be in the format
expected by getSidebarMenus.
Args:
params: a dict with params for this View.
"""
# Return the found result
if params['sidebar']:
default = params['sidebar']
result = default[:]
for item in params['sidebar_additional']:
result.append(item)
return result
# Construct defaults manualy
defaults = params['sidebar_defaults']
result = []
for item in params['sidebar_additional']:
result.append(item)
for url, menu_text, access_type in defaults:
url = url % params['url_name'].lower()
item = (url, menu_text % params, access_type)
result.append(item)
return result
def getSidebarMenu(id, user, items, params):
"""Returns an dictionary with one sidebar entry.
Items is expected to be a tuple with an url, a menu_text, and an
access_type. The access_type is then passed to checkAccess, if it
raises out_of_band.Error, the item will not be added.
Args:
items: see above
params: a dict with params for this View
Params usage:
The params dictionary is passed as argument to getSidebarItems,
see the docstring of getSidebarItems on how it uses it.
rights: The rights dictionary is used to check if the user has
the required rights to see a sidebar item.
See checkAccess for more details on how the rights dictionary
is used to check access rights.
sidebar_heading: The sidebar_heading value is used to set the
heading variable in the result.
name: The name value is used if sidebar_heading is not present.
Returns:
A dictionary is returned with it's 'heading' value set as explained above.
It's 'items' value is constructed by calling _getSidebarItems. It constists
of dictionaries with a url and a title field.
"""
rights = params['rights']
submenus = []
args = SIDEBAR_ACCESS_ARGS
kwargs = SIDEBAR_ACCESS_KWARGS
# reset and pre-fill the Checker's cache
rights.setCurrentUser(id, user)
for url, menu_text, access_type in items:
try:
rights.checkAccess(access_type, kwargs)
submenus.append({'url': url, 'title': menu_text})
except out_of_band.Error:
pass
return submenus
def getSidebarMenus(id, user, params=None):
"""Constructs the default sidebar menu for a View.
Calls getSidebarItems to retrieve the items that should be in the
menu. Then passes the result to getSidebarMenu. See the respective
docstrings for an explanation on what they do.
Args:
params: a dict with params for this View
"""
items = getSidebarItems(params)
submenus = getSidebarMenu(id, user, items, params)
if not submenus:
return
menu = {}
if 'sidebar_heading' not in params:
params['sidebar_heading'] = params['name']
menu['heading'] = params['sidebar_heading']
menu['items'] = submenus
menu['group'] = params['sidebar_grouping']
menus = [menu]
return menus
|
Python
| 0
|
@@ -724,25 +724,8 @@
%5D%0A%0A%0A
-import operator%0A%0A
from
@@ -757,44 +757,8 @@
band
-%0Afrom soc.views.helper import access
%0A%0Aim
@@ -1020,17 +1020,16 @@
.%0A %22%22%22%0A
-%0A
global
@@ -3816,16 +3816,18 @@
= %5B%5D%0A%0A
+ #
args =
|
c37ad5d87f5b9d30168495547c744d778f75c7ec
|
set status to completed if file already present
|
custom/icds/models.py
|
custom/icds/models.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import uuid
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import pre_delete
from corehq.apps.app_manager.dbaccessors import get_build_doc_by_version
from custom.icds.const import (
FILE_TYPE_CHOICE_ZIP,
FILE_TYPE_CHOICE_DOC,
DISPLAY_CHOICE_LIST,
DISPLAY_CHOICE_FOOTER,
)
from custom.icds.utils.hosted_ccz import HostedCCZUtility
from custom.icds.validators import (
HostedCCZLinkIdentifierValidator,
)
from custom.nic_compliance.utils import hash_password
class HostedCCZLink(models.Model):
identifier = models.CharField(null=False, unique=True, max_length=255, db_index=True,
validators=[HostedCCZLinkIdentifierValidator])
username = models.CharField(null=False, max_length=255)
password = models.CharField(null=False, max_length=255)
domain = models.CharField(null=False, max_length=255)
page_title = models.CharField(blank=True, max_length=255)
def to_json(self):
from custom.icds.serializers import HostedCCZLinkSerializer
return HostedCCZLinkSerializer(self).data
def save(self, *args, **kwargs):
if not self.pk:
self.password = hash_password(self.password)
self.full_clean()
super(HostedCCZLink, self).save(*args, **kwargs)
class HostedCCZSupportingFile(models.Model):
FILE_TYPE_CHOICES = (
(FILE_TYPE_CHOICE_ZIP, 'zip'),
(FILE_TYPE_CHOICE_DOC, 'document'),
)
DISPLAY_CHOICES = (
(DISPLAY_CHOICE_LIST, 'list'),
(DISPLAY_CHOICE_FOOTER, 'footer'),
)
domain = models.CharField(null=False, max_length=255, db_index=True)
blob_id = models.CharField(null=False, max_length=255, db_index=True)
file_name = models.CharField(max_length=255, blank=False)
file_type = models.IntegerField(choices=FILE_TYPE_CHOICES)
display = models.IntegerField(choices=DISPLAY_CHOICES)
class Meta:
unique_together = ('domain', 'blob_id')
@cached_property
def utility(self):
return HostedCCZUtility(self)
def delete_file(self):
# if no other domain is using this file/doc, delete the file from blobdb
if not (HostedCCZSupportingFile.objects.filter(blob_id=self.blob_id)
.exclude(domain=self.domain).exists()):
self.utility.remove_file_from_blobdb()
def delete(self, *args, **kwargs):
self.delete_file()
super(HostedCCZSupportingFile, self).delete(*args, **kwargs)
@classmethod
def create(cls, domain, file_name, file_type, display, file_obj):
supporting_file = cls(
file_name=file_name, file_type=file_type, display=display,
domain=domain, blob_id=uuid.uuid4().hex
)
supporting_file.full_clean()
supporting_file.save()
supporting_file.utility.store_file_in_blobdb(file_obj, file_name)
return supporting_file.utility.file_exists()
class HostedCCZ(models.Model):
PENDING = 'pending'
BUILDING = 'building'
FAILED = 'failed'
COMPLETED = 'completed'
STATUSES = [PENDING, BUILDING, FAILED, COMPLETED]
link = models.ForeignKey(HostedCCZLink, on_delete=models.CASCADE)
app_id = models.CharField(max_length=255, null=False)
version = models.IntegerField(null=False)
profile_id = models.CharField(max_length=255, blank=True)
file_name = models.CharField(max_length=255, blank=True)
note = models.TextField(blank=True)
status = models.CharField(max_length=255, null=False, blank=False, default='pending',
choices=((PENDING, _('Pending')),
(BUILDING, _('Building')),
(FAILED, _('Failed')),
(COMPLETED, _('Completed')),
))
class Meta:
unique_together = ('link', 'app_id', 'version', 'profile_id')
@cached_property
def utility(self):
return HostedCCZUtility(self)
@cached_property
def domain(self):
return self.link.domain
def to_json(self, app_names):
from custom.icds.serializers import HostedCCZSerializer
return HostedCCZSerializer(self, context={'app_names': app_names}).data
@cached_property
def blob_id(self):
assert self.app_id
assert self.version
return "%s%s%s" % (self.app_id, self.version, self.profile_id)
@cached_property
def build_doc(self):
if self.link_id and self.app_id and self.version:
return get_build_doc_by_version(self.domain, self.app_id, self.version)
@cached_property
def build_profile(self):
if self.profile_id and self.build_doc:
return self.build_doc['build_profiles'].get(self.profile_id)
def clean(self):
if not self.build_doc:
raise ValidationError({
'version': _("Build not found for app {} and version {}.").format(
self.app_id, self.version
)
})
if not self.build_doc['is_released']:
raise ValidationError({
'version': _("Version not released. Please mark it as released.")})
if not self.file_name:
self.file_name = "%s-v%s" % (self.build_doc['name'], self.version)
super(HostedCCZ, self).clean()
def save(self, *args, **kwargs):
from custom.icds.tasks.hosted_ccz import setup_ccz_file_for_hosting
self.full_clean()
email = kwargs.pop('email') if 'email' in kwargs else None
super(HostedCCZ, self).save(*args, **kwargs)
if not self.utility.file_exists():
setup_ccz_file_for_hosting.delay(self.pk, user_email=email)
def delete_ccz(self):
# if no other link is using this app+version+profile, delete the file from blobdb
if not (HostedCCZ.objects.filter(app_id=self.app_id, version=self.version, profile_id=self.profile_id)
.exclude(link=self.link).exists()):
self.utility.remove_file_from_blobdb()
def delete(self, *args, **kwargs):
self.delete_ccz()
super(HostedCCZ, self).delete(*args, **kwargs)
def update_status(self, new_status):
assert new_status in self.STATUSES
HostedCCZ.objects.filter(id=self.pk).update(status=new_status)
def delete_ccz_for_link(sender, instance, **kwargs):
for hosted_ccz in HostedCCZ.objects.filter(link=instance):
hosted_ccz.delete_ccz()
pre_delete.connect(delete_ccz_for_link, sender=HostedCCZLink)
|
Python
| 0
|
@@ -5788,16 +5788,130 @@
se None%0A
+ file_exists = self.utility.file_exists()%0A if file_exists:%0A self.status = self.COMPLETED%0A
@@ -5970,29 +5970,16 @@
if not
-self.utility.
file_exi
@@ -5981,18 +5981,16 @@
e_exists
-()
:%0A
|
87858c9a53244758d92ce1e97ee47465cdd8b9f1
|
Mark TestSwiftDifferentClangFlags as XFAIL
|
packages/Python/lldbsuite/test/lang/swift/different_clang_flags/TestSwiftDifferentClangFlags.py
|
packages/Python/lldbsuite/test/lang/swift/different_clang_flags/TestSwiftDifferentClangFlags.py
|
# TestSwiftDifferentClangFlags.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we use the right compiler flags when debugging
"""
import commands
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import os.path
import unittest2
def execute_command (command):
# print '%% %s' % (command)
(exit_status, output) = commands.getstatusoutput (command)
# if output:
# print output
# print 'status = %u' % (exit_status)
return exit_status
class TestSwiftDifferentClangFlags(TestBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.skipUnlessDarwin
@decorators.swiftTest
@decorators.skipIf(debug_info=decorators.no_match("dsym"), bugnumber="This test requires a stripped binary and a dSYM")
def test_swift_different_clang_flags(self):
"""Test that we use the right compiler flags when debugging"""
self.buildAll()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec (self.main_source)
self.modb_source = "modb.swift"
self.modb_source_spec = lldb.SBFileSpec (self.modb_source)
def buildAll(self):
execute_command("make everything")
def do_test(self):
"""Test that we use the right compiler flags when debugging"""
exe_name = "a.out"
exe = os.path.join(os.getcwd(), exe_name)
def cleanup():
execute_command("make cleanup")
self.addTearDownHook(cleanup)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
main_breakpoint = target.BreakpointCreateBySourceRegex('break here', self.main_source_spec)
self.assertTrue(main_breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
modb_breakpoint = target.BreakpointCreateBySourceRegex('break here', self.modb_source_spec)
self.assertTrue(modb_breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint (process, modb_breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
var = self.frame.FindVariable("myThree")
three = var.GetChildMemberWithName("three")
lldbutil.check_variable(self,var,False,typename="modb.MyStruct")
lldbutil.check_variable(self,three,False,value="3")
process.Continue()
threads = lldbutil.get_threads_stopped_at_breakpoint (process, main_breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
var = self.frame.FindVariable("a")
lldbutil.check_variable(self,var,False,value="2")
var = self.frame.FindVariable("b")
lldbutil.check_variable(self,var,False,value="3")
var = self.frame.EvaluateExpression("fA()")
lldbutil.check_variable(self,var,False,value="2")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
Python
| 0
|
@@ -1227,16 +1227,87 @@
dSYM%22)%0A
+ @decorators.skipIf(oslist=%5B%22macosx%22%5D, bugnumber=%22rdar://26051347%22)%0A
def
|
d48a0cb42aabf0c42debfddfcf09a8c2f954b9ff
|
Put project when create account.analytic.account for test.
|
purchase_line_with_delivery_service_info/tests/test_purchase_line_with_delivery_service_info.py
|
purchase_line_with_delivery_service_info/tests/test_purchase_line_with_delivery_service_info.py
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
import openerp.tests.common as common
class TestPurchaseLineWithDeliveryServiceInfo(common.TransactionCase):
def setUp(self):
super(TestPurchaseLineWithDeliveryServiceInfo, self).setUp()
self.sale_model = self.env['sale.order']
self.procurement_model = self.env['procurement.order']
account_vals = {'name': 'account procurement service project',
'date_start': '2016-01-15',
'date': '2016-02-20'}
self.account = self.env['account.analytic.account'].create(
account_vals)
sale_vals = {
'partner_id': self.env.ref('base.res_partner_1').id,
'partner_shipping_id': self.env.ref('base.res_partner_1').id,
'partner_invoice_id': self.env.ref('base.res_partner_1').id,
'pricelist_id': self.env.ref('product.list0').id,
'carrier_id': self.env.ref('delivery.normal_delivery_carrier').id,
'project_id': self.account.id}
sale_line_vals = {
'product_id': self.env.ref('product.product_product_6').id,
'name': self.env.ref('product.product_product_6').name,
'product_uos_qty': 1,
'product_uom': self.env.ref('product.product_product_6').uom_id.id,
'price_unit': self.env.ref('product.product_product_6').list_price}
sale_vals['order_line'] = [(0, 0, sale_line_vals)]
self.sale_order = self.sale_model.create(sale_vals)
self.sale_order.delivery_set()
for line in self.sale_order.order_line:
if line.product_id.type == 'service':
line.product_id.write(
{'route_ids':
[(6, 0,
[self.env.ref('stock.route_warehouse0_mto').id,
self.env.ref('purchase.route_warehouse0_buy').id])],
'seller_ids':
[(6, 0, [self.env.ref('base.res_partner_14').id])]})
self.service_product = line.product_id
line.write({'delivery_standard_price': 578.00})
def test_confirm_sale_with_delivery_service(self):
self.sale_order.action_button_confirm()
cond = [('origin', '=', self.sale_order.name),
('product_id', '=', self.service_product.id)]
procurement = self.procurement_model.search(cond)
self.assertEqual(
len(procurement), 1,
"Procurement not generated for the service product type")
procurement.run()
cond = [('group_id', '=', procurement.group_id.id),
('product_id', '=', self.service_product.id),
('state', '=', 'confirmed')]
procurement2 = self.procurement_model.search(cond)
self.assertEqual(
len(procurement2), 1,
"Procurement2 not generated for the service product type")
procurement2.run()
self.assertTrue(
bool(procurement2.purchase_id),
"Purchase no generated for procurement Service")
for line in procurement2.purchase_id.order_line:
if line.product_id.type == 'service':
self.assertEqual(
line.price_unit,
procurement2.sale_line_id.delivery_standard_price,
"Erroneous price on purchase order line")
|
Python
| 0
|
@@ -827,24 +827,231 @@
count_vals)%0A
+ project_vals = %7B'name': 'project procurement service project',%0A 'analytic_account_id': self.account.id%7D%0A self.project = self.env%5B'project.project'%5D.create(project_vals)%0A
sale
|
6593fe983b40a5d4c467cb2ea0847e10e5faae1c
|
Update Beck_Pang_First_Python_practice_2.7.py
|
Coding_practice/Beck_Pang_First_Python_practice_2.7.py
|
Coding_practice/Beck_Pang_First_Python_practice_2.7.py
|
"""
Beck Pang 25/07/2014
First practice project for our summer robotics team
"""
import random
def name_to_number (name):
# This helper function converts the string name into a number between 0 an 4
# pre: take a name in String as a parameter
# post: return an represented number in integer
if (name == "rock"):
return 0
elif (name == "Spock"):
return 1
elif (name == "paper"):
return 2
elif (name == "lizard"):
return 3
elif (name ==hel "scissors"):
return 4
else:
print ("This name is not included in this game.\n")
def number_to_name (number):
# pre: take a number in integer as a parameter
# post: return a name in String
if (number == 0):
return "rock"
elif (number == 1):
return "Spock"
elif (number == 2):
return "paper"
elif (number == 3):
return "lizard"
elif (number == 4):
return "scissors"
else:
return "no word found"
def rpsls (player_choice):
# This function operate the main functionality
# pre: take a player's choice in String as a parameter
# post: print the player and computer's choices in the console
# and show the result
print ("\n")
player_number = name_to_number(player_choice)
comp_number = random.randrange(5)
comp_name = number_to_name(comp_number)
print ("Player chooses " + player_choice + "\n")
print ("Computer chooses " + comp_name + "\n")
difference = (comp_number - player_number) % 5
if (difference == 0):
print ("Player and computer tie!")
elif (difference >= 2):
print ("Player wins!")
else:
print ("Computer wins!")
""" There is no main function in this game
Please play this game in the console.
"""
|
Python
| 0.000001
|
@@ -493,19 +493,16 @@
(name ==
-hel
%22scisso
|
dec1ec2504cdd2d642d7efcf97ebab2bf263a29d
|
Add ignore list to detect changes of k8s auth config
|
ansible/modules/hashivault/hashivault_k8s_auth_config.py
|
ansible/modules/hashivault/hashivault_k8s_auth_config.py
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
from ansible.module_utils.hashivault import get_keys_updated
from hvac.exceptions import InvalidPath
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_k8s_auth_config
version_added: "4.3.0"
short_description: Hashicorp Vault k8s auth config
description:
- Module to configure an k8s auth mount
options:
mount_point:
description:
- name of the secret engine mount name.
default: kubernetes
kubernetes_host:
description:
- host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server
token_reviewer_jwt:
description:
- a service account JWT used to access the TokenReview API to validate other JWTs during login
kubernetes_ca_cert:
description:
- PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API
pem_keys:
description:
- Optional list of PEM-formatted public keys or certificates used to verify the signatures of Kubernetes
service account JWTs. If a certificate is given, its public key will be extracted.
issuer:
description:
- Optional JWT issuer. If no issuer is specified, then this plugin will use kubernetes.io/serviceaccount as
the default issuer (Available in hvac 0.10.2).
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_k8s_auth_config:
kubernetes_host: https://192.168.99.100:8443
kubernetes_ca_cert: "-----BEGIN CERTIFICATE-----\n.....\n-----END CERTIFICATE-----"
'''
def main():
argspec = hashivault_argspec()
argspec['mount_point'] = dict(required=False, type='str', default='kubernetes')
argspec['kubernetes_host'] = dict(required=False, type='str', default=None)
argspec['token_reviewer_jwt'] = dict(required=False, type='str', default=None)
argspec['kubernetes_ca_cert'] = dict(required=False, type='str', default=None)
argspec['pem_keys'] = dict(required=False, type='list', default=None)
argspec['issuer'] = dict(required=False, type='str', default=None)
required_together = [['kubernetes_host', 'kubernetes_ca_cert']]
module = hashivault_init(argspec, supports_check_mode=True, required_together=required_together)
result = hashivault_k8s_auth_config(module)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_k8s_auth_config(module):
params = module.params
client = hashivault_auth_client(params)
mount_point = params.get('mount_point').strip('/')
desired_state = dict()
desired_state['kubernetes_host'] = params.get('kubernetes_host')
desired_state['token_reviewer_jwt'] = params.get('token_reviewer_jwt')
desired_state['kubernetes_ca_cert'] = params.get('kubernetes_ca_cert')
desired_state['pem_keys'] = params.get('pem_keys')
if params.get('issuer'):
desired_state['issuer'] = params.get('issuer')
desired_state['mount_point'] = mount_point
keys_updated = desired_state.keys()
try:
current_state = client.auth.kubernetes.read_config(mount_point=mount_point)
keys_updated = get_keys_updated(desired_state, current_state)
if not keys_updated:
return {'changed': False}
except InvalidPath:
pass
if not module.check_mode:
client.auth.kubernetes.configure(**desired_state)
return {'changed': True, 'keys_updated': keys_updated}
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -3433,16 +3433,114 @@
_point%0A%0A
+ ignore_list = %5B%0A 'mount_point',%0A 'token_reviewer_jwt',%0A 'pem_keys'%0A %5D%0A
keys
@@ -3728,24 +3728,37 @@
urrent_state
+, ignore_list
)%0A if
|
7ad0a248ab019c1c99080da5860ecba56f5a7654
|
switch to beautiful soup for extraction
|
ifind/common/position_content_extractor.py
|
ifind/common/position_content_extractor.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
__author__ = 'rose'
from BeautifulSoup import BeautifulSoup
from copy import deepcopy
class PositionContentExtractor(object):
def __init__(self, div_ids=None):
self.div_ids = div_ids
self.html = ''
self.html_soup = None
self.text = ''
def set_div_ids(self, ids):
self.div_ids = ids
self.process_html_page(self.html)
def process_html_page(self, html):
""" reads in the html, parses it, and removes the set of specified div ids, assigning the text to self.text
:param html: expects a valid html document
:return: None
"""
self.html = html
self.html_soup = BeautifulSoup(html)
self.text = self._remove_div_content()
def get_subtext(self, num_words=0, percentage=None):
"""
takes first num_words from text and return them as a string
:param text:
:return:
"""
words = self.text.split()
subtext = ' '
if(percentage):
num_words = round(self._calc_percentage(percentage,len(words)))
if(num_words):
if num_words == 0:#return all text if 0 assumes 0 means wants all
return self.text
if len(words) > num_words:
return subtext.join(words[0:num_words])
# for term in :
# print term
# subtext += ' '.join(term)
else:
return self.text
def _remove_div_content(self):
"""
returns a string with the content the html with the content of
divs in div_ids removed
:param div_ids: a list of the ids of the div to be removed
:return: a string with the divs content removed
"""
result = ''
for div_id in self.div_ids:
self.html_soup.find("div", {"id": div_id})
return result
def _calc_percentage(self, percentage, total_words):
if total_words == 0:
return 0
else:
return 100 * float(percentage)/float(total_words)
|
Python
| 0
|
@@ -1370,121 +1370,8 @@
s%5D)%0A
- # for term in :%0A # print term%0A # subtext += ' '.join(term)%0A
@@ -1672,83 +1672,319 @@
urn:
- a string with the divs content removed%0A %22%22%22%0A result = ''
+None%0A %22%22%22%0A result = ''%0A #for all div ids find the elements in the beautiful soup tree and extract%0A #the corresponding div%0A #this would update self.html_soup which we want to keep whole html in%0A #so perform on a deep copy%0A soup_copy = deepcopy(self.html_soup)
%0A
@@ -2028,30 +2028,32 @@
-s
el
-f.html_soup
+em = soup_copy
.find(%22d
@@ -2085,22 +2085,154 @@
+
-return result%0A
+elem.extract()%0A #set the text of the class to be the result of removing the text from the divs%0A self.text = soup_copy.get_text()
%0A%0A%0A
|
157a5b7350928eab13170da7e0c06636ae1e9975
|
Add option to load instrumentation key from env
|
applicationinsights/flask/ext.py
|
applicationinsights/flask/ext.py
|
from applicationinsights import TelemetryClient
from applicationinsights.channel import AsynchronousSender
from applicationinsights.channel import AsynchronousQueue
from applicationinsights.channel import TelemetryChannel
from applicationinsights.logging import LoggingHandler
from applicationinsights.requests import WSGIApplication
CONF_PREFIX = "APPINSIGHTS"
CONF_KEY = CONF_PREFIX + "_INSTRUMENTATIONKEY"
CONF_ENDPOINT_URI = CONF_PREFIX + "_ENDPOINT_URI"
CONF_DISABLE_REQUEST_LOGGING = CONF_PREFIX + "_DISABLE_REQUEST_LOGGING"
CONF_DISABLE_TRACE_LOGGING = CONF_PREFIX + "_DISABLE_TRACE_LOGGING"
CONF_DISABLE_EXCEPTION_LOGGING = CONF_PREFIX + "_DISABLE_EXCEPTION_LOGGING"
class AppInsights(object):
def __init__(self, app=None):
self._key = None
self._endpoint_uri = None
self._channel = None
self._requests_middleware = None
self._trace_log_handler = None
self._exception_telemetry_client = None
if app:
self.init_app(app)
def init_app(self, app):
self._key = app.config.get(CONF_KEY)
if not self._key:
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
if self._endpoint_uri:
sender = AsynchronousSender(self._endpoint_uri)
else:
sender = AsynchronousSender()
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app)
def _init_request_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware
def _init_trace_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler)
def _init_exception_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
exception_telemetry_client.track_exception(
type=type(exception),
value=exception,
tb=exception.__traceback__)
raise exception
self._exception_telemetry_client = exception_telemetry_client
def flush(self):
if self._requests_middleware:
self._requests_middleware.flush()
if self._trace_log_handler:
self._trace_log_handler.flush()
if self._exception_telemetry_client:
self._exception_telemetry_client.flush()
|
Python
| 0
|
@@ -1,28 +1,51 @@
+from os import getenv%0A%0A
from applicationinsights imp
@@ -1094,16 +1094,36 @@
ONF_KEY)
+ or getenv(CONF_KEY)
%0A%0A
|
c93128f7491495c48951194831bebb46e9db54d5
|
fix urljoin import (#247)
|
aqua/datadog_checks/aqua/aqua.py
|
aqua/datadog_checks/aqua/aqua.py
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import requests
import simplejson as json
from six.moves.urllib.parse import urlparse
from datadog_checks.checks import AgentCheck
SEVERITIES = {
'total': 'all',
'high': 'high',
'medium': 'medium',
'ok': 'ok',
'low': 'low'
}
class AquaCheck(AgentCheck):
"""
Collect metrics from Aqua.
"""
SERVICE_CHECK_NAME = 'aqua.can_connect'
def check(self, instance):
instance_tags = instance.get("tags", [])
self.validate_instance(instance)
try:
token = self.get_aqua_token(instance)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=instance_tags)
except Exception as ex:
self.log.error("Failed to get Aqua token, skipping check. Error: %s" % ex)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=instance_tags)
return
self._report_base_metrics(instance, token)
self._report_connected_enforcers(instance, token)
status_metrics = [
# (
# metric_name,
# route,
# statuses
# )
(
'aqua.audit.access',
'/api/v1/audit/access_totals?alert=-1&limit=100&time=hour&type=all',
{
'total': 'all',
'success': 'success',
'blocked': 'blocked',
'detect': 'detect',
'alert': 'alert'
}
),
(
'aqua.scan_queue',
'/api/v1/scanqueue/summary',
{
'total': 'all',
'failed': 'failed',
'in_progress': 'in_progress',
'finished': 'finished',
'pending': 'pending'
}
)
]
for metric_name, route, statuses in status_metrics:
self._report_status_metrics(instance, token, metric_name, route, statuses)
def validate_instance(self, instance):
"""
Validate that all required parameters are set in the instance.
"""
if any(map(lambda x: x not in instance, ['api_user', 'password', 'url'])):
raise Exception("Aqua instance missing one of api_user, password, or url")
def get_aqua_token(self, instance):
"""
Retrieve the Aqua token for next queries.
"""
headers = {'Content-Type': 'application/json', 'charset': 'UTF-8'}
data = {"id": instance['api_user'], "password": instance['password']}
res = requests.post(
instance['url'] + '/api/v1/login',
data=json.dumps(data),
headers=headers,
timeout=self.default_integration_http_timeout
)
res.raise_for_status()
return json.loads(res.text)['token']
def _perform_query(self, instance, route, token):
"""
Form queries and interact with the Aqua API.
"""
headers = {'Content-Type': 'application/json', 'charset': 'UTF-8', 'Authorization': 'Bearer ' + token}
res = requests.get(urlparse.urljoin(instance['url'], route), headers=headers, timeout=60)
res.raise_for_status()
return json.loads(res.text)
def _report_base_metrics(self, instance, token):
"""
Report metrics about images, vulnerabilities, running containers, and enforcer hosts
"""
try:
metrics = self._perform_query(instance, '/api/v1/dashboard', token)
except Exception as ex:
self.log.error("Failed to get base metrics. Some metrics will be missing. Error: %s" % ex)
return
# images
metric_name = 'aqua.images'
image_metrics = metrics['registry_counts']['images']
for sev in SEVERITIES:
self.gauge(metric_name, image_metrics[sev],
tags=instance.get('tags', []) + ['severity:%s' % SEVERITIES[sev]])
# vulnerabilities
metric_name = 'aqua.vulnerabilities'
vuln_metrics = metrics['registry_counts']['vulnerabilities']
for sev in SEVERITIES:
self.gauge(metric_name, vuln_metrics[sev],
tags=instance.get('tags', []) + ['severity:%s' % SEVERITIES[sev]])
# running containers
metric_name = 'aqua.running_containers'
container_metrics = metrics['running_containers']
self.gauge(metric_name, container_metrics['total'], tags=instance.get('tags', []) + ['status:all'])
self.gauge(metric_name, container_metrics['unregistered'],
tags=instance.get('tags', []) + ['status:unregistered'])
self.gauge(
metric_name,
container_metrics['total'] - container_metrics['unregistered'],
tags=instance.get('tags', []) + ['status:registered']
)
# disconnected enforcers
metric_name = 'aqua.enforcers'
enforcer_metrics = metrics['hosts']
self.gauge('aqua.enforcers', enforcer_metrics['disconnected_count'],
tags=instance.get('tags', []) + ['status:disconnected'])
def _report_status_metrics(self, instance, token, metric_name, route, statuses):
try:
metrics = self._perform_query(instance, route, token)
except Exception as ex:
self.log.error("Failed to get %s metrics. Error: %s" % (metric_name, ex))
return
for status in statuses:
self.gauge(metric_name, metrics[status], tags=instance.get('tags', []) + ['status:%s' % statuses[status]])
def _report_connected_enforcers(self, instance, token):
"""
Report metrics about enforcers
"""
try:
metrics = self._perform_query(instance, '/api/v1/hosts', token)
except Exception as ex:
self.log.error("Failed to get enforcer metrics. Error: %s" % ex)
return
self.gauge('aqua.enforcers', metrics['count'], tags=instance.get('tags', []) + ['status:all'])
|
Python
| 0
|
@@ -180,21 +180,20 @@
port url
-parse
+join
%0A%0Afrom d
@@ -3265,17 +3265,8 @@
get(
-urlparse.
urlj
@@ -3662,32 +3662,172 @@
-except Exception
+# io-related exceptions (all those coming from requests are included in that) are handled.%0A # All other exceptions are raised.%0A except IOError
as ex:%0A
@@ -5584,32 +5584,172 @@
-except Exception
+# io-related exceptions (all those coming from requests are included in that) are handled.%0A # All other exceptions are raised.%0A except IOError
as ex:%0A
@@ -6225,32 +6225,172 @@
-except Exception
+# io-related exceptions (all those coming from requests are included in that) are handled.%0A # All other exceptions are raised.%0A except IOError
as ex:%0A
|
8483de37aad2256266deb404ce4d9eaae31a8142
|
Remove backend
|
kaggle-classification/keras_trainer/rnn.py
|
kaggle-classification/keras_trainer/rnn.py
|
"""RNN"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, GRU, Dense, Embedding, Dropout, Bidirectional, TimeDistributed, Multiply, Flatten, Reshape, Dot
from keras.models import Model
from keras_trainer import base_model
from keras_trainer.custom_metrics import auc_roc
import keras.backend as K
class RNNModel(base_model.BaseModel):
""" RNN
hparams:
embedding_dim
vocab_size
train_embedding
"""
def __init__(self, embeddings_matrix, hparams):
self.embeddings_matrix = embeddings_matrix
self.hparams = hparams
def get_model(self):
sequence_length = self.hparams.sequence_length
I = Input(shape=(sequence_length,), dtype='float32')
E = Embedding(
self.hparams.vocab_size,
self.hparams.embedding_dim,
weights=[self.embeddings_matrix],
input_length=sequence_length,
trainable=self.hparams.train_embedding)(
I)
H = Bidirectional(GRU(128, return_sequences=True))(E)
A = TimeDistributed(Dense(3), input_shape=(sequence_length, 256))(H)
A = Flatten()(A)
A = Dense(sequence_length, activation='softmax')(A)
X = Dot((1, 1))([H, A])
X = Dense(128, activation='relu')(X)
X = Dropout(self.hparams.dropout_rate)(X)
Output = Dense(6, activation='sigmoid')(X)
model = Model(inputs=I, outputs=Output)
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', auc_roc])
print(model.summary())
return model
|
Python
| 0.000001
|
@@ -363,34 +363,8 @@
roc%0A
-import keras.backend as K%0A
%0A%0Acl
|
fa022cf128f16e97aad4670cbb87b9178744e0d8
|
Add unittest for page size=1 & page chunck size=1
|
kboard/core/tests/test_utils_pagination.py
|
kboard/core/tests/test_utils_pagination.py
|
from django.core.paginator import Paginator
from django.test import TestCase
from core.utils import get_pages_nav_info
class TestUtilsPagiation(TestCase):
def get_pages_nav_info(PAGE_SIZE, NAV_PAGE_CHUNK_SIZE, TEST_LOAD_PAGE, OBJS_SIZE):
object_list = range(OBJS_SIZE)
paginator = Paginator(object_list, PAGE_SIZE)
page = paginator.page(TEST_LOAD_PAGE)
return get_pages_nav_info(page, nav_chunk_size=NAV_PAGE_CHUNK_SIZE)
def test_pages_nav_info(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=10,
OBJS_SIZE=100)
check_elements = ('pre_nav_page', 'page_list', 'current_page_num', 'next_nav_page')
for check_element in check_elements:
self.assertIn(check_element, page_nav_info)
self.assertEqual(5, page_nav_info['pre_nav_page'])
self.assertEqual([6, 7, 8, 9, 10], page_nav_info['page_list'])
self.assertEqual(10, page_nav_info['current_page_num'])
self.assertEqual(11, page_nav_info['next_nav_page'])
def test_pre_and_next_nav_pages_are_not_exist_if_page_count_less_than_nav_page_chunck_size(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=3,
OBJS_SIZE=17)
self.assertEqual(-1, page_nav_info['pre_nav_page'])
self.assertEqual(-1, page_nav_info['next_nav_page'])
def test_pre_nav_page_exist(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=6,
OBJS_SIZE=31)
self.assertEqual(5, page_nav_info['pre_nav_page'])
def test_next_nav_page_exist(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=1,
OBJS_SIZE=31)
self.assertEqual(6, page_nav_info['next_nav_page'])
|
Python
| 0
|
@@ -2115,28 +2115,992 @@
_nav_info%5B'next_nav_page'%5D)%0A
+%0A def test_page_size_1_case(self):%0A page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=1, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=8,%0A OBJS_SIZE=50)%0A%0A self.assertEqual(5, page_nav_info%5B'pre_nav_page'%5D)%0A self.assertEqual(%5B6, 7, 8, 9, 10%5D, page_nav_info%5B'page_list'%5D)%0A self.assertEqual(8, page_nav_info%5B'current_page_num'%5D)%0A self.assertEqual(11, page_nav_info%5B'next_nav_page'%5D)%0A%0A def test_page_chunk_size_1_case(self):%0A page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=1, TEST_LOAD_PAGE=8,%0A OBJS_SIZE=50)%0A%0A self.assertEqual(7, page_nav_info%5B'pre_nav_page'%5D)%0A self.assertEqual(%5B8, %5D, page_nav_info%5B'page_list'%5D)%0A self.assertEqual(8, page_nav_info%5B'current_page_num'%5D)%0A self.assertEqual(9, page_nav_info%5B'next_nav_page'%5D)%0A
|
b1f0ade0c8f6ef2282b42ff43f67bc933308f319
|
Fix example script to only create one L2 if --l2cache and -nX are given as parameters.
|
configs/example/se.py
|
configs/example/se.py
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import m5
from m5.objects import *
import os, optparse, sys
m5.AddToPath('../common')
import Simulation
from Caches import *
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
# Benchmark options
parser.add_option("-c", "--cmd",
default=os.path.join(m5_root, "tests/test-progs/hello/bin/alpha/linux/hello"),
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="The options to pass to the binary, use \" \" around the entire\
string.")
parser.add_option("-i", "--input", default="",
help="A file of input to give to the binary.")
execfile(os.path.join(config_root, "common", "Options.py"))
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
process = LiveProcess()
process.executable = options.cmd
process.cmd = [options.cmd] + options.options.split()
if options.input != "":
process.input = options.input
if options.detailed:
#check for SMT workload
workloads = options.cmd.split(';')
if len(workloads) > 1:
process = []
smt_idx = 0
inputs = []
if options.input != "":
inputs = options.input.split(';')
for wrkld in workloads:
smt_process = LiveProcess()
smt_process.executable = wrkld
smt_process.cmd = wrkld + " " + options.options
if inputs and inputs[smt_idx]:
smt_process.input = inputs[smt_idx]
process += [smt_process, ]
smt_idx += 1
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.clock = '2GHz'
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
physmem = PhysicalMemory(range=AddrRange("512MB")),
membus = Bus(), mem_mode = test_mem_mode)
system.physmem.port = system.membus.port
for i in xrange(np):
if options.caches:
system.cpu[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB'))
if options.l2cache:
system.l2 = L2Cache(size='2MB')
system.tol2bus = Bus()
system.l2.cpu_side = system.tol2bus.port
system.l2.mem_side = system.membus.port
system.cpu[i].connectMemPorts(system.tol2bus)
else:
system.cpu[i].connectMemPorts(system.membus)
system.cpu[i].workload = process
if options.fastmem:
system.cpu[0].physmem_port = system.physmem.port
root = Root(system = system)
Simulation.run(options, root, system, FutureClass)
|
Python
| 0
|
@@ -3803,16 +3803,189 @@
s.port%0A%0A
+if options.l2cache:%0A system.l2 = L2Cache(size='2MB')%0A system.tol2bus = Bus()%0A system.l2.cpu_side = system.tol2bus.port%0A system.l2.mem_side = system.membus.port%0A%0A
for i in
@@ -4188,176 +4188,8 @@
he:%0A
- system.l2 = L2Cache(size='2MB')%0A system.tol2bus = Bus()%0A system.l2.cpu_side = system.tol2bus.port%0A system.l2.mem_side = system.membus.port%0A
|
1075b77abfbd04238a95c4b3e070c80fb141ab8b
|
Rename get_view() to get_view_method() for clarity.
|
incuna_test_utils/testcases/integration.py
|
incuna_test_utils/testcases/integration.py
|
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render
from .request import BaseRequestTestCase
class BaseIntegrationTestCase(BaseRequestTestCase):
"""
A TestCase that operates similarly to a Selenium test.
Contains methods that access pages and render them to strings full of
HTML. Can be used to assert the contents of templates as well as doing
normal TestCase things.
Must be subclassed with the following attributes in order to work:
* user_factory
* view (class-based or function-based view)
"""
def get_view(self):
"""
Returns the class's attached view, as a method.
Checks self.view exists, and throws an ImproperlyConfigured exception
if it doesn't. Otherwise, it returns the view as a method.
"""
try:
view = self.view
except AttributeError:
message = "This test must have a 'view' attribute."
raise ImproperlyConfigured(message)
try:
return view.as_view()
except AttributeError:
return view
def access_view(self, *args, **kwargs):
"""
Helper method that accesses the test's view.
Accepts an optional 'request' kwarg. If this isn't supplied,
access_view creates a basic request on your behalf.
Returns a HTTPResponse object with the request (created or otherwise)
attached.
"""
request = kwargs.pop('request', None)
if request is None:
request = self.create_request()
view = self.get_view()
response = view(request, *args, **kwargs)
# Add the request to the response.
# This is a weird-looking but compact way of ensuring we have access to
# the request everywhere we need it, without doing clunky things like
# returning tuples all the time.
response.request = request
return response
def render_to_str(self, response, request=None):
"""
Render a HTTPResponse into a string that holds the HTML content.
Accepts an optional request parameter, and looks for a request attached
to the response if the optional parameter isn't specified.
"""
if request is None:
request = response.request
response = render(request, response.template_name, response.context_data)
return str(response.content)
def access_view_and_render_response(self, *args, **kwargs):
"""
Accesses the view and returns a string of HTML.
Combines access_view, an assertion on the returned status, and
render_to_str.
Accepts an optional 'request' kwarg holding a HTTPRequest, but will
create a simple one if the parameter isn't supplied, and
'expected_status', an expected status code for the response, which
defaults to 200. Other args and kwargs are passed on to the view
method.
"""
request = kwargs.pop('request', None)
expected_status = kwargs.pop('expected_status', 200)
response = self.access_view(request, *args, **kwargs)
# Assert that the response has the correct status code before we go
# any further. Throwing accurately descriptive failures when something
# goes wrong is better than trying to run assertions on the content
# of a HTML response for some random 404 page.
self.assertEqual(expected_status, response.status_code)
# Render the response and return it.
return self.render_to_str(response)
def assert_count(self, needle, haystack, count):
"""
Assert that 'needle' occurs exactly 'count' times in 'haystack'.
Used as a snazzier, stricter version of unittest.assertIn.
Outputs a verbose error message when it fails.
"""
actual_count = haystack.count(needle)
# Build a verbose error message in case we need it.
plural = '' if count == 1 else 's'
message = 'Expected {count} instance{plural} of {needle}, but found {actual_count}, in {haystack}'
message = message.format_map(locals())
# Make the assertion.
self.assertEqual(count, actual_count, message)
|
Python
| 0
|
@@ -586,24 +586,31 @@
def get_view
+_method
(self):%0A
@@ -1601,32 +1601,39 @@
()%0A%0A view
+_method
= self.get_view
@@ -1628,24 +1628,31 @@
elf.get_view
+_method
()%0A r
@@ -1661,24 +1661,31 @@
ponse = view
+_method
(request, *a
|
4bda6769c5e6a01e8a62b06ad310dc846fbc7cbf
|
fix an error handling bug I introduced
|
core/dbt/task/base.py
|
core/dbt/task/base.py
|
from abc import ABCMeta, abstractmethod
import os
import six
from dbt.config import RuntimeConfig, Project
from dbt.config.profile import read_profile, PROFILES_DIR
from dbt import flags
from dbt import tracking
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.exceptions
class NoneConfig(object):
@classmethod
def from_args(cls, args):
return None
def read_profiles(profiles_dir=None):
"""This is only used for some error handling"""
if profiles_dir is None:
profiles_dir = PROFILES_DIR
raw_profiles = read_profile(profiles_dir)
if raw_profiles is None:
profiles = {}
else:
profiles = {k: v for (k, v) in raw_profiles.items() if k != 'config'}
return profiles
PROFILES_HELP_MESSAGE = """
For more information on configuring profiles, please consult the dbt docs:
https://docs.getdbt.com/docs/configure-your-profile
"""
@six.add_metaclass(ABCMeta)
class BaseTask(object):
ConfigType = NoneConfig
def __init__(self, args, config):
self.args = args
self.config = config
@classmethod
def from_args(cls, args):
try:
config = cls.ConfigType.from_args(args)
except DbtProjectError as exc:
logger.info("Encountered an error while reading the project:")
logger.info(to_string(exc))
tracking.track_invalid_invocation(
args=args,
result_type=exc.result_type)
raise dbt.exceptions.RuntimeException('Could not run dbt')
except DbtProfileError as exc:
logger.info("Encountered an error while reading profiles:")
logger.info(" ERROR {}".format(str(exc)))
all_profiles = read_profiles(args.profiles_dir).keys()
if len(all_profiles) > 0:
logger.info("Defined profiles:")
for profile in all_profiles:
logger.info(" - {}".format(profile))
else:
logger.info("There are no profiles defined in your "
"profiles.yml file")
logger.info(PROFILES_HELP_MESSAGE)
tracking.track_invalid_invocation(
args=args,
result_type=exc.result_type)
raise dbt.exceptions.RuntimeException('Could not run dbt')
return cls(args, config)
@abstractmethod
def run(self):
raise dbt.exceptions.NotImplementedException('Not Implemented')
def interpret_results(self, results):
return True
def get_nearest_project_dir():
root_path = os.path.abspath(os.sep)
cwd = os.getcwd()
while cwd != root_path:
project_file = os.path.join(cwd, "dbt_project.yml")
if os.path.exists(project_file):
return cwd
cwd = os.path.dirname(cwd)
return None
def move_to_nearest_project_dir():
nearest_project_dir = get_nearest_project_dir()
if nearest_project_dir is None:
raise dbt.exceptions.RuntimeException(
"fatal: Not a dbt project (or any of the parent directories). "
"Missing dbt_project.yml file"
)
os.chdir(nearest_project_dir)
class RequiresProjectTask(BaseTask):
@classmethod
def from_args(cls, args):
move_to_nearest_project_dir()
return super(RequiresProjectTask, cls).from_args(args)
class ConfiguredTask(RequiresProjectTask):
ConfigType = RuntimeConfig
class ProjectOnlyTask(RequiresProjectTask):
ConfigType = Project
|
Python
| 0.000002
|
@@ -1197,24 +1197,39 @@
except
+dbt.exceptions.
DbtProjectEr
@@ -1561,16 +1561,31 @@
except
+dbt.exceptions.
DbtProfi
|
5a49379b349dc33f403cfdcfd6148dfa362512de
|
Raise NotImplementedError instead of NotImplemented
|
keystone/tests/unit/test_hacking_checks.py
|
keystone/tests/unit/test_hacking_checks.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import mock
import pep8
from keystone.tests.hacking import checks
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import hacking as hacking_fixtures
class BaseStyleCheck(unit.BaseTestCase):
def setUp(self):
super(BaseStyleCheck, self).setUp()
self.code_ex = self.useFixture(self.get_fixture())
self.addCleanup(delattr, self, 'code_ex')
def get_checker(self):
"""Return the checker to be used for tests in this class."""
raise NotImplemented('subclasses must provide a real implementation')
def get_fixture(self):
return hacking_fixtures.HackingCode()
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def run_check(self, code):
pep8.register_check(self.get_checker())
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def assert_has_errors(self, code, expected_errors=None):
actual_errors = [e[:3] for e in self.run_check(code)]
self.assertItemsEqual(expected_errors or [], actual_errors)
class TestCheckForMutableDefaultArgs(BaseStyleCheck):
def get_checker(self):
return checks.CheckForMutableDefaultArgs
def test(self):
code = self.code_ex.mutable_default_args['code']
errors = self.code_ex.mutable_default_args['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestBlockCommentsBeginWithASpace(BaseStyleCheck):
def get_checker(self):
return checks.block_comments_begin_with_a_space
def test(self):
code = self.code_ex.comments_begin_with_space['code']
errors = self.code_ex.comments_begin_with_space['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestAssertingNoneEquality(BaseStyleCheck):
def get_checker(self):
return checks.CheckForAssertingNoneEquality
def test(self):
code = self.code_ex.asserting_none_equality['code']
errors = self.code_ex.asserting_none_equality['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class BaseLoggingCheck(BaseStyleCheck):
def get_checker(self):
return checks.CheckForLoggingIssues
def get_fixture(self):
return hacking_fixtures.HackingLogging()
def assert_has_errors(self, code, expected_errors=None):
# pull out the parts of the error that we'll match against
actual_errors = (e[:3] for e in self.run_check(code))
# adjust line numbers to make the fixture data more readable.
import_lines = len(self.code_ex.shared_imports.split('\n')) - 1
actual_errors = [(e[0] - import_lines, e[1], e[2])
for e in actual_errors]
self.assertEqual(expected_errors or [], actual_errors)
class TestCheckForDebugLoggingIssues(BaseLoggingCheck):
def test_for_translations(self):
fixture = self.code_ex.assert_no_translations_for_debug_logging
code = self.code_ex.shared_imports + fixture['code']
errors = fixture['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestLoggingWithWarn(BaseLoggingCheck):
def test(self):
data = self.code_ex.assert_not_using_deprecated_warn
code = self.code_ex.shared_imports + data['code']
errors = data['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestCheckForNonDebugLoggingIssues(BaseLoggingCheck):
def test_for_translations(self):
for example in self.code_ex.examples:
code = self.code_ex.shared_imports + example['code']
errors = example['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestDictConstructorWithSequenceCopy(BaseStyleCheck):
def get_checker(self):
return checks.dict_constructor_with_sequence_copy
def test(self):
code = self.code_ex.dict_constructor['code']
errors = self.code_ex.dict_constructor['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
|
Python
| 0.000005
|
@@ -1069,16 +1069,21 @@
lemented
+Error
('subcla
@@ -1100,16 +1100,53 @@
provide
+'%0A '
a real i
|
b77fc9813908401c29fe9e8dc121a88a5299a4b7
|
fix parameters indication
|
corefunc/atlasbase.py
|
corefunc/atlasbase.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 et:
import numpy as np
import os
import nibabel as nib
from ATT.algorithm import roimethod
from ATT.algorithm import tools
from ATT.iofunc import iofiles
class ImageCalculator(object):
def __init__(self):
pass
def merge4D(self, rawdatapath, outdatapath, outname):
"""
Merge 3D images together
--------------------------------------
Parameters:
rawdatapath: raw data path. Need to be a list contains path of each image
outdatapath: output path.
outname: output data name.
Return:
outdata: merged file
"""
if isinstance(rawdatapath, np.ndarray):
rawdatapath = rawdatapath.tolist()
header = nib.load(rawdatapath[0]).get_header()
datashape = nib.load(rawdatapath[0]).get_data().shape
nsubj = len(rawdatapath)
outdata = np.zeros((datashape[0], datashape[1], datashape[2], nsubj))
for i in range(nsubj):
if os.path.exists(rawdatapath[i]):
outdata[...,i] = nib.load(rawdatapath[i]).get_data()
else:
raise Exception('File may not exist of %s' % rawdatapath[i])
img = nib.Nifti1Image(outdata, None, header)
if outdatapath.split('/')[-1].endswith('.nii.gz'):
nib.save(img, outdatapath)
else:
# suffix = rawdatapath[0].split('/')[-1].split('.')[1:]
# outdatapath_new = os.path.join(outdatapath, '.'.join([outname] + suffix))
outdatapath_new = os.path.join(outdatapath, outname)
nib.save(img, outdatapath_new)
return outdata
class ExtractSignals(object):
def __init__(self, atlas, regions):
masksize = tools.get_masksize(atlas)
self.atlas = atlas
self.regions = regions
self.masksize = masksize
def getsignals(self, targ, method = 'mean'):
"""
Get measurement signals from target image by mask atlas.
-------------------------------------------
Parameters:
targ: target image
method: 'mean' or 'std'
roi signal extraction method
Return:
signals: extracted signals
"""
signals = tools.get_signals(targ, self.atlas, method)
self.signals = signals
return signals
def getcoordinate(self, targ, size = [2,2,2], method = 'peak'):
"""
Get peak coordinate signals from target image by mask atlas.
-----------------------------------------------------------
Parameters:
targ: target image
size: voxel size
method: 'peak' or 'center'
coordinate extraction method
"""
coordinate = tools.get_coordinate(targ, self.atlas, size, method)
self.coordinate = coordinate
return coordinate
def getdistance_array2point(self, targ, pointloc, size = [2,2,2], coordmeth = 'peak', distmeth = 'euclidean'):
"""
Get distance from each coordinate to a specific location
-------------------------------------------------------
Parameters:
targ: target image
pointloc: location of a specific voxel
size: voxel size
coordmeth: 'peak' or center
coordinate extraction method
distmeth: distance method
"""
if not hasattr(self, 'coordinate'):
self.coordinate = tools.get_coordinate(targ, self.atlas, size, coordmeth)
dist_point = np.empty((self.coordinate.shape[0], self.coordinate.shape[1]))
pointloc = np.array(pointloc)
if pointloc.shape[0] == 1:
pointloc = np.tile(pointloc, [dist_point.shape[1],1])
for i in range(dist_point.shape[0]):
for j in range(dist_point.shape[1]):
if not isinstance(pointloc[j], np.ndarray):
raise Exception('pointloc should be 2 dimension array or list')
dist_point[i,j] = tools.calcdist(self.coordinate[i,j,:], pointloc[j], distmeth)
self.dist_point = dist_point
return dist_point
class MakeMasks(object):
def __init__(self, header = None, issave = False, savepath = '.'):
self.header = header
self.issave = issave
self.savepath = savepath
def makepm(self, atlas, meth = 'all', maskname = 'pm.nii.gz'):
"""
Make probabilistic maps
------------------------------
Parameters:
atlas: atlas mask
meth: 'all' or 'part'
maskname: output mask name, by default is 'pm.nii.gz'
Return:
pm
"""
pm = roimethod.make_pm(atlas, meth)
self.pm = pm
if self.issave is True:
iofactory = iofiles.IOFactory()
factory = iofactory.createfactory(self.savepath, maskname)
if maskname.endswith('gz') | maskname.endswith('nii'):
factory.save_nifti(pm, self.header)
return pm
def makempm(self, threshold, maskname = 'mpm.nii.gz'):
"""
Make maximum probabilistic maps
--------------------------------
Parameters:
threshold: mpm threshold
maskname: output mask name. By default is 'mpm.nii.gz'
"""
if self.pm is None:
raise Exception('please execute makepm first')
mpm = roimethod.make_mpm(self.pm, threshold)
self.mpm = mpm
if self.issave is True:
iofactory = iofiles.IOFactory()
factory = iofactory.createfactory(self.savepath, maskname)
if maskname.endswith('gz') | maskname.endswith('nii'):
factory.save_nifti(mpm, self.header)
return mpm
def makemask_sphere(self, voxloc, radius, atlasshape = [91,109,91], maskname = 'spheremask.nii.gz'):
"""
Make mask by means of roi sphere
-------------------------------------------------
Parameters:
voxloc: peak voxel locations of each region
Note that it's a list
radius: sphere radius, such as [3,3,3],etc.
atlasshape: atlas shape
maskname: Output mask name. By default is 'speremask.nii.gz'
"""
spheremask = np.empty(atlasshape)
for i, e in enumerate(voxloc):
spheremask = roimethod.sphere_roi(spheremask, e, radius, i+1)
self.spheremask = spheremask
if self.issave is True:
iofactory = iofiles.IOFactory()
factory = iofactory.createfactory(self.savepath, maskname)
if maskname.endswith('gz') | maskname.endswith('nii'):
factory.save_nifti(spheremask, self.header)
return spheremask
def makemas_rgrowth(self):
pass
|
Python
| 0.000001
|
@@ -2215,16 +2215,25 @@
or 'std'
+ or 'max'
%0A
|
7fd0ed0897ffedf117698502cdefac0436ac4f2c
|
remove MotechTab import
|
corehq/tabs/config.py
|
corehq/tabs/config.py
|
from corehq.apps.styleguide.tabs import SGExampleTab, SimpleCrispyFormSGExample, \
ControlsDemoSGExample
from corehq.tabs.tabclasses import DashboardTab, ProjectReportsTab, ProjectInfoTab, SetupTab, \
ProjectDataTab, ApplicationsTab, CloudcareTab, MessagingTab, ProjectUsersTab, \
AdminTab, IndicatorAdminTab, SMSAdminTab, AccountingTab, ProjectSettingsTab, \
MySettingsTab, MotechTab
MENU_TABS = (
DashboardTab,
ProjectInfoTab,
ProjectReportsTab,
IndicatorAdminTab,
ProjectDataTab,
SetupTab,
ProjectUsersTab,
ApplicationsTab,
CloudcareTab,
MessagingTab,
MotechTab,
# invisible
ProjectSettingsTab,
MySettingsTab,
# Admin
AdminTab,
SMSAdminTab,
AccountingTab,
# Styleguide
SGExampleTab,
SimpleCrispyFormSGExample,
ControlsDemoSGExample,
)
|
Python
| 0
|
@@ -386,19 +386,8 @@
sTab
-, MotechTab
%0A%0AME
@@ -598,23 +598,8 @@
ab,%0A
- MotechTab,%0A
|
0185a30a340fae956c0e5b9d9f354e56e2e2178a
|
update the wsgi file
|
crate_project/wsgi.py
|
crate_project/wsgi.py
|
import newrelic.agent
newrelic.agent.initialize()
import pinax.env
from django.core.wsgi import get_wsgi_application
# setup the environment for Django and Pinax
pinax.env.setup_environ(__file__)
# set application for WSGI processing
application = get_wsgi_application()
|
Python
| 0
|
@@ -1,12 +1,117 @@
+import os%0Aimport sys%0A%0Asys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), %22apps%22)))%0A%0A
import newre
@@ -120,17 +120,16 @@
c.agent%0A
-%0A
newrelic
@@ -153,26 +153,8 @@
()%0A%0A
-import pinax.env%0A%0A
from
@@ -202,127 +202,8 @@
tion
-%0A%0A# setup the environment for Django and Pinax%0Apinax.env.setup_environ(__file__)%0A%0A# set application for WSGI processing
%0Aapp
|
61a70e74becdc30009e8a2776594ee9f861bd824
|
fix os x job when workspace is empty
|
create_jenkins_job.py
|
create_jenkins_job.py
|
#!/usr/bin/env python3
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
try:
import ros_buildfarm # noqa
except ImportError:
sys.exit("Could not import ros_buildfarm, please add to the PYTHONPATH.")
try:
import jenkinsapi # noqa
except ImportError:
sys.exit("Could not import jenkinsapi, please install it with pip or apt-get.")
from ros_buildfarm.jenkins import configure_job
from ros_buildfarm.jenkins import connect
from ros_buildfarm.templates import expand_template
try:
from ros_buildfarm.templates import template_prefix_path
except ImportError:
sys.exit("Could not import symbol from ros_buildfarm, please update ros_buildfarm.")
template_prefix_path[:] = \
[os.path.join(os.path.abspath(os.path.dirname(__file__)), 'job_templates')]
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Creates the 'ros2_batch_ci_osx' job on Jenkins")
parser.add_argument(
'--jenkins-url', '-u', help="Url of the jenkins server to which the job should be added",
required=True)
parser.add_argument(
'--ci-scripts-repository', default='git@github.com:ros2/ros2.git',
help="repository from which ci scripts should be cloned"
)
parser.add_argument(
'--ci-scripts-default-branch', default='ci_scripts',
help="default branch of the ci scripts repository to get ci scripts from (this is a job parameter)"
)
parser.add_argument(
'--commit', action='store_true',
help='Actually modify the Jenkis jobs instead of only doing a dry run',
)
args = parser.parse_args(argv)
data = {
'ci_scripts_repository': args.ci_scripts_repository,
'ci_scripts_default_branch': args.ci_scripts_default_branch,
'time_trigger_spec': '',
'mailer_recipients': '',
}
jenkins = connect(args.jenkins_url)
os_configs = {
'linux': {
'label_expression': 'linux_slave_on_master',
'shell_type': 'Shell',
},
'osx': {
'label_expression': 'osx_slave_dosa',
'shell_type': 'Shell',
},
'windows': {
'label_expression': 'windows_slave_eatable',
'shell_type': 'BatchFile',
},
}
jenkins_kwargs = {}
if not args.commit:
jenkins_kwargs['dry_run'] = True
# configure os specific jobs
for os_name in sorted(os_configs.keys()):
# configure manual triggered job
job_name = 'ros2_batch_ci_' + os_name
job_data = dict(data)
job_data['os_name'] = os_name
job_data.update(os_configs[os_name])
job_config = expand_template('ros2_batch_ci_job.xml.template', job_data)
configure_job(jenkins, job_name, job_config, **jenkins_kwargs)
# configure packaging job
job_name = 'ros2_packaging_' + os_name
job_config = expand_template('ros2_packaging_job.xml.template', job_data)
configure_job(jenkins, job_name, job_config, **jenkins_kwargs)
# configure nightly triggered job
job_name = 'ros2_batch_ci_' + os_name + '_nightly'
job_data['time_trigger_spec'] = '0 10 * * *'
job_data['mailer_recipients'] = 'ros@osrfoundation.org'
job_config = expand_template('ros2_batch_ci_job.xml.template', job_data)
configure_job(jenkins, job_name, job_config, **jenkins_kwargs)
# configure the launch job
job_data = {'label_expression': 'master'}
job_config = expand_template('ros2_batch_ci_launcher_job.xml.template', job_data)
configure_job(jenkins, 'ros2_batch_ci_launcher', job_config, **jenkins_kwargs)
if __name__ == '__main__':
main()
|
Python
| 0.000013
|
@@ -2727,32 +2727,231 @@
type': 'Shell',%0A
+ # the current OS X slave can't handle git@github urls%0A 'ci_scripts_repository': args.ci_scripts_repository.replace(%0A 'git@github.com:', 'https://github.com/'),%0A
%7D,%0A
|
a0fab69d12d64d4e5371fcb26a4ec70365a76fa6
|
Move task results database to data dir
|
cref/app/web/tasks.py
|
cref/app/web/tasks.py
|
from celery import Celery
from cref.app.terminal import run_cref
app = Celery(
'tasks',
backend='db+sqlite:///results.sqlite',
broker='amqp://guest@localhost//'
)
@app.task
def predict_structure(sequence, params={}):
return run_cref(sequence)
|
Python
| 0.000004
|
@@ -113,16 +113,21 @@
lite:///
+data/
results.
|
6e287393ad87ad09f94f845d372b5835ad4ebaba
|
Increase plot range
|
examples/alpha250-4/adc-bram/test.py
|
examples/alpha250-4/adc-bram/test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import time
from adc_bram import AdcBram
from koheron import connect
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
host = os.getenv('HOST', '192.168.1.50')
client = connect(host, 'adc-bram', restart=False)
driver = AdcBram(client)
print('ADC size = {}'.format(driver.adc_size))
driver.set_reference_clock(0) # External
time.sleep(5)
clk_200MHz = {'idx': 0, 'fs': 200E6}
clk_250MHz = {'idx': 1, 'fs': 250E6}
clock = clk_250MHz
driver.set_sampling_frequency(clock['idx'])
# driver.phase_shift(0)
t = np.arange(driver.adc_size) / clock['fs']
t_us = 1e6 * t
# Dynamic plot
fig = plt.figure()
ax = fig.add_subplot(111)
y = np.zeros(driver.adc_size)
line0 = Line2D([], [], color='blue', label='IN0')
line1 = Line2D([], [], color='green', label='IN1')
line2 = Line2D([], [], color='red', label='IN2')
line3 = Line2D([], [], color='cyan', label='IN3')
ax.add_line(line0)
ax.add_line(line1)
ax.add_line(line2)
ax.add_line(line3)
ax.set_xlabel('Time (us)')
ax.set_ylabel('ADC Raw data')
ax.set_xlim((t_us[0], t_us[-1]))
# ax.set_ylim((-2**15, 2**15))
ax.set_ylim((-300, 300))
ax.legend(loc='upper right')
fig.canvas.draw()
while True:
try:
driver.trigger_acquisition()
time.sleep(0.1)
driver.get_adc(0)
driver.get_adc(1)
line0.set_data(t_us, driver.adc0[0,:])
line1.set_data(t_us, driver.adc0[1,:])
line2.set_data(t_us, driver.adc1[0,:])
line3.set_data(t_us, driver.adc1[1,:])
fig.canvas.draw()
plt.pause(0.001)
# plt.pause(3600)
except KeyboardInterrupt:
break
|
Python
| 0.000001
|
@@ -1206,15 +1206,19 @@
((-3
-00, 300
+2768, 32768
))%0Aa
|
63b828983b38eb00e68683c19c51f444102a030d
|
support p3k on python file
|
plugin/vim_bootstrap_updater.py
|
plugin/vim_bootstrap_updater.py
|
import os
import urllib
import urllib2
def vimrc_path(editor):
return os.path.expanduser('~/.%src' % editor)
def _generate_vimrc(editor, langs):
params = [('langs', l.strip()) for l in langs]
params.append(('editor', editor))
data = urllib.urlencode(params)
resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/generate.vim",
data)
return resp.read()
def get_available_langs():
resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/langs")
return resp.read()
def update(vimrc, editor, langs):
content = _generate_vimrc(editor, langs)
vimrc = os.path.expanduser(vimrc)
with open(vimrc, 'w') as fh:
fh.write(str(content))
return content
|
Python
| 0
|
@@ -7,36 +7,140 @@
os%0A
-import urllib%0Aimport urllib2
+try:%0A import urllib2%0A import urllib%0Aexcept ImportError:%0A import urllib.request as urllib2%0A import urllib.parse as urllib
%0A%0A%0Ad
@@ -625,24 +625,40 @@
resp.read()
+.decode('utf-8')
%0A%0A%0Adef updat
|
cf67d625b60465e553b7aa88e78190ec38b213a8
|
Add back deleted test for external search
|
marvin/tests/test_tasks.py
|
marvin/tests/test_tasks.py
|
from marvin.models import Movie
from marvin.tests import TestCaseWithTempDB
from mock import Mock, patch
class OMDBFetchTest(TestCaseWithTempDB):
def setUp(self):
# We can't import the tasks module until create_app has been called,
# which is why we do it down here
from marvin import tasks
self.parse_runtime_to_seconds = tasks.parse_runtime_to_seconds
self.update_meta = tasks.update_meta_for_movie
def test_metadata_fetching(self):
# Tests find_duration, find_metascore and find_imdb_ratings
movie = Movie(
title='The Hobbit: The Desolation of Smaug',
external_id='imdb:tt1170358'
)
movie_id = self.addItems(movie)
attrs = {
'json.return_value': {
"Actors": "Martin Freeman, Ian McKellen, Richard Armitage, Benedict Cumberbatch",
"Director": "Peter Jackson",
"Genre": "Adventure, Drama, Fantasy",
"Plot": "The Dwarves, Bilbo and Gandalf have successfully escaped the Misty Mountains, and Bilbo " +
"has gained the One Ring. They all continue their journey to get their gold back from the " +
"Dragon, Smaug.",
"Poster": "http://ia.media-imdb.com/images/M/MV5BMjAxMjMzMzAxOV5BMl5BanBnXkFtZTcwNTU3NzU2OQ@@._V1_" +
"SX300.jpg",
"Rated": "N/A",
"Released": "13 Dec 2013",
"Metascore": "66",
"Response": "True",
"Runtime": "161 min",
"Title": "The Hobbit: The Desolation of Smaug",
"Type": "N/A",
"Writer": "Fran Walsh, Philippa Boyens",
"Year": "2013",
"imdbID": "tt1170358",
"imdbRating": "8.2",
"imdbVotes": "206,398"
},
'status_code': 200,
}
response = Mock(**attrs)
requests = Mock(**{'get.return_value': response})
with patch('marvin.tasks.requests', requests):
self.update_meta('imdb:tt1170358')
with self.app.test_request_context():
movie = Movie.query.get(movie_id)
self.assertEqual(movie.duration_in_s, 161*60)
self.assertEqual(movie.imdb_rating, 8.2)
self.assertEqual(movie.number_of_imdb_votes, 206398)
self.assertEqual(movie.metascore, 66)
self.assertEqual(movie.cover_img,
"http://ia.media-imdb.com/images/M/MV5BMjAxMjMzMzAxOV5BMl5BanBnXkFtZTcwNTU3NzU2OQ@@._V1_SX300.jpg")
def test_parse_runtime(self):
tests = [
('120 min', 7200),
('1 h 30 min', 5400),
('yeah, right', 0),
]
for runtime, expected in tests:
parsed = self.parse_runtime_to_seconds(runtime)
self.assertEqual(parsed, expected)
|
Python
| 0
|
@@ -441,16 +441,1718 @@
r_movie%0A
+ self.external_search = tasks.external_search%0A%0A%0A def test_query_omdb(self):%0A # Supply a mock of requests%0A attrs = %7B%0A 'json.return_value': %7B%0A u'Search': %5B%0A %7B%0A u'imdbID': u'tt0109179',%0A u'Year': u'1998',%0A u'Type': u'movie',%0A u'Title': u%22Ava's Magical Adventure%22%0A %7D,%0A %7B%0A u'imdbID': u'tt0798009',%0A u'Year': u'2004',%0A u'Type': u'episode',%0A u'Title': u'Ava Gardner: Another Touch of Venus'%0A %7D,%0A %7B%0A u'imdbID': u'tt1548011',%0A u'Year': u'2008',%0A u'Type': u'movie',%0A u'Title': u'Stalking Ava Gardner'%0A %7D,%0A %7B%0A u'imdbID': u'tt0363371',%0A u'Year': u'1975',%0A u'Type': u'series',%0A u'Title': u'Signora Ava'%0A %7D%0A %5D%0A %7D,%0A 'status_code': 200,%0A %7D%0A response = Mock(**attrs)%0A requests = Mock(**%7B'get.return_value': response%7D)%0A%0A # pylint: disable=multiple-statements%0A patch_requests = patch('marvin.tasks.requests', requests)%0A with patch_requests, self.app.test_request_context():%0A self.external_search('ava')%0A with self.app.test_request_context():%0A # we expect the 'series'-type to be ignored%0A self.assertEqual(len(Movie.query.all()), 3)%0A
%0A%0A de
|
5851c7524e66cfe3ee7e59542224d097d6e01f9e
|
Remove 's' in path.
|
mojo/public/tools/download_archiecture_independent_frameworks.py
|
mojo/public/tools/download_archiecture_independent_frameworks.py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURRENT_PATH, "pylib"))
import gs
PREBUILT_FILE_PATH = os.path.join(CURRENT_PATH, "prebuilt", "frameworks")
FILES_TO_DOWNLOAD = [
"apptest.dartzip",
]
def download(tools_directory, version_file):
stamp_path = os.path.join(PREBUILT_FILE_PATH, "VERSION")
version_path = os.path.join(CURRENT_PATH, version_file)
with open(version_path) as version_file:
version = version_file.read().strip()
try:
with open(stamp_path) as stamp_file:
current_version = stamp_file.read().strip()
if current_version == version:
return 0 # Already have the right version.
except IOError:
pass # If the stamp file does not exist we need to download new binaries.
for file_name in FILES_TO_DOWNLOAD:
download_file(file_name, version, tools_directory)
with open(stamp_path, 'w') as stamp_file:
stamp_file.write(version)
return 0
def download_file(basename, version, tools_directory):
find_depot_tools_path = os.path.join(CURRENT_PATH, tools_directory)
sys.path.insert(0, find_depot_tools_path)
# pylint: disable=F0401
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
gs_path = "gs://mojo/files/" + version + "/" + basename
output_file = os.path.join(PREBUILT_FILE_PATH, basename)
gs.download_from_public_bucket(gs_path, output_file,
depot_tools_path)
def main():
parser = argparse.ArgumentParser(description="Downloads bundled frameworks "
"binaries from google storage.")
parser.add_argument("--tools-directory",
dest="tools_directory",
metavar="<tools-directory>",
type=str,
required=True,
help="Path to the directory containing "
"find_depot_tools.py, specified as a relative path "
"from the location of this file.")
parser.add_argument("--version-file",
dest="version_file",
metavar="<version-file>",
type=str,
default="../VERSION",
help="Path to the file containing the version of the "
"shell to be fetched, specified as a relative path "
"from the location of this file (default: "
"%(default)s).")
args = parser.parse_args()
return download(args.tools_directory, args.version_file)
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.000005
|
@@ -1491,17 +1491,16 @@
ojo/file
-s
/%22 + ver
|
cd92705f02242b7f17bea75398cdb0eda8479254
|
add example to feature extraction documentation
|
laserchicken/feature_extractor/__init__.py
|
laserchicken/feature_extractor/__init__.py
|
"""Feature extractor module."""
import importlib
import re
import numpy as np
from laserchicken import keys, utils
from .eigenvals_feature_extractor import EigenValueFeatureExtractor
from .entropy_feature_extractor import EntropyFeatureExtractor
from .sigma_z_feature_extractor import SigmaZFeatureExtractor
from .height_statistics_feature_extractor import HeightStatisticsFeatureExtractor
def _feature_map(module_name=__name__):
"""Construct a mapping from feature names to feature extractor classes."""
module = importlib.import_module(module_name)
return {
feature_name: extractor
for name, extractor in vars(module).items() if re.match('^[A-Z][a-zA-Z0-9_]*FeatureExtractor$', name)
for feature_name in extractor.provides()
}
FEATURES = _feature_map()
def compute_features(env_point_cloud, neighborhoods, target_point_cloud, feature_names, volume, overwrite=False,
**kwargs):
"""
Compute features for each target and store result as attributes in target point cloud
:param env_point_cloud: environment point cloud
:param neighborhoods: list of neighborhoods which are themselves lists of indices referring to the environment
:param target_point_cloud: point cloud of targets
:param feature_names: list of features that are to be calculated
:param volume: object describing the volume that contains the neighborhood points
:param overwrite: if true, even features that are already in the targets point cloud will be calculated and stored
:param kwargs: keyword arguments for the individual feature extractors
:return: None, results are stored in attributes of the target point cloud
"""
_verify_feature_names(feature_names)
ordered_features = _make_feature_list(feature_names)
for feature in ordered_features:
if (not overwrite) and (feature in target_point_cloud[keys.point]):
continue # Skip feature calc if it is already there and we do not overwrite
extractor = FEATURES[feature]()
_add_or_update_feature(env_point_cloud, neighborhoods, target_point_cloud, extractor, volume, overwrite, kwargs)
utils.add_metadata(target_point_cloud, type(extractor).__module__, extractor.get_params())
def _verify_feature_names(feature_names):
unknown_features = [f for f in feature_names if f not in FEATURES]
if any(unknown_features):
raise ValueError('Unknown features selected: {}. Available feature are: {}'
.format(', '.join(unknown_features), ', '.join(FEATURES.keys())))
def _add_or_update_feature(env_point_cloud, neighborhoods, target_point_cloud, extractor, volume, overwrite, kwargs):
n_targets = len(target_point_cloud[keys.point]["x"]["data"])
for k in kwargs:
setattr(extractor, k, kwargs[k])
provided_features = extractor.provides()
n_features = len(provided_features)
feature_values = [np.empty(n_targets, dtype=np.float64) for i in range(n_features)]
for target_index in range(n_targets):
point_values = extractor.extract(env_point_cloud, neighborhoods[target_index], target_point_cloud,
target_index, volume)
if n_features > 1:
for i in range(n_features):
feature_values[i][target_index] = point_values[i]
else:
feature_values[0][target_index] = point_values
for i in range(n_features):
feature = provided_features[i]
if overwrite or (feature not in target_point_cloud[keys.point]):
target_point_cloud[keys.point][feature] = {"type": np.float64, "data": feature_values[i]}
def _make_feature_list(feature_names):
feature_list = reversed(_make_feature_list_helper(feature_names))
seen = set()
return [f for f in feature_list if not (f in seen or seen.add(f))]
def _make_feature_list_helper(feature_names):
feature_list = feature_names
for feature_name in feature_names:
extractor = FEATURES[feature_name]()
dependencies = extractor.requires()
feature_list.extend(dependencies)
feature_list.extend(_make_feature_list_helper(dependencies))
return feature_list
|
Python
| 0
|
@@ -1010,16 +1010,22 @@
sult as
+point
attribut
@@ -1050,16 +1050,483 @@
cloud%0A%0A
+ Example:%0A %3E%3E%3E point_cloud = read_ply.read('data1.ply')%0A %3E%3E%3E target_point_cloud = read_ply.read('data2.ply')%0A %3E%3E%3E volume = volume_specification.InfiniteCylinder(4)%0A %3E%3E%3E neighborhoods = compute_neighborhoods(point_cloud, target_point_cloud, volume)%0A %3E%3E%3E compute_features(point_cloud, neighborhoods, target_point_cloud, %5B'eigenv_1', 'kurto_z'%5D, volume)%0A%0A Results of the example above are stored in the target point cloud as extra point attributes%0A%0A
:par
|
ff16e993beca5ff0aa490bb140a46e64d026a6c9
|
Fix task banner with 'actionable' callback when using templates in name (#38165)
|
lib/ansible/plugins/callback/actionable.py
|
lib/ansible/plugins/callback/actionable.py
|
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
self.last_task = None
self.shown_title = False
def v2_playbook_on_handler_task_start(self, task):
self.super_ref.v2_playbook_on_handler_task_start(task)
self.shown_title = True
def v2_playbook_on_task_start(self, task, is_conditional):
self.last_task = task
self.shown_title = False
def display_task_banner(self):
if not self.shown_title:
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
self.shown_title = True
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display_task_banner()
self.super_ref.v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_on_ok(result)
def v2_runner_on_unreachable(self, result):
self.display_task_banner()
self.super_ref.v2_runner_on_unreachable(result)
def v2_runner_on_skipped(self, result):
pass
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_item_on_ok(result)
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_item_on_failed(self, result):
self.display_task_banner()
self.super_ref.v2_runner_item_on_failed(result)
|
Python
| 0
|
@@ -713,16 +713,51 @@
on%0A'''%0A%0A
+from ansible import constants as C%0A
from ans
@@ -834,16 +834,16 @@
default%0A
-
%0A%0Aclass
@@ -1114,16 +1114,53 @@
= None%0A
+ self.last_task_banner = None%0A
@@ -1429,16 +1429,76 @@
= task%0A
+ self.last_task_banner = self._get_task_banner(task)%0A
@@ -1666,16 +1666,16 @@
, None)%0A
-
@@ -1695,32 +1695,1256 @@
n_title = True%0A%0A
+ def _print_task_banner(self, task):%0A self._display.banner(self.last_task_banner)%0A self._print_task_path(self.last_task)%0A self._last_task_banner = self.last_task._uuid%0A%0A def _print_task_path(self, task):%0A if self._display.verbosity %3E= 2:%0A path = task.get_path()%0A if path:%0A self._display.display(u%22task path: %25s%22 %25 path, color=C.COLOR_DEBUG)%0A%0A def _get_task_banner(self, task):%0A # args can be specified as no_log in several places: in the task or in%0A # the argument spec. We can check whether the task is no_log but the%0A # argument spec can't be because that is only run on the target%0A # machine and we haven't run it thereyet at this time.%0A #%0A # So we give people a config option to affect display of the args so%0A # that they can secure this if they feel that their stdout is insecure%0A # (shoulder surfing, logging stdout straight to a file, etc).%0A args = ''%0A if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:%0A args = u', '.join(u'%25s=%25s' %25 a for a in task.args.items())%0A args = u' %25s' %25 args%0A%0A return u%22TASK %5B%25s%25s%5D%22 %25 (task.get_name().strip(), args)%0A%0A
def v2_runne
|
ce70c151a8cbc70526e125f829a1fafdf390e9a7
|
Make scalars is_active short circuit if apt (#621)
|
tensorboard/plugins/scalar/scalars_plugin.py
|
tensorboard/plugins/scalar/scalars_plugin.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Scalars plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import six
from six import StringIO
from werkzeug import wrappers
import tensorflow as tf
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import metadata
class OutputFormat(object):
"""An enum used to list the valid output formats for API calls."""
JSON = 'json'
CSV = 'csv'
class ScalarsPlugin(base_plugin.TBPlugin):
"""Scalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._multiplexer = context.multiplexer
def get_plugin_apps(self):
return {
'/scalars': self.scalars_route,
'/tags': self.tags_route,
}
def is_active(self):
"""The scalars plugin is active iff any run has at least one scalar tag."""
return bool(self._multiplexer) and any(self.index_impl().values())
def index_impl(self):
"""Return {runName: {tagName: {displayName: ..., description: ...}}}."""
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
content = metadata.parse_plugin_metadata(content)
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description)}
return result
def scalars_impl(self, tag, run, output_format):
"""Result of the form `(body, mime_type)`."""
tensor_events = self._multiplexer.Tensors(run, tag)
values = [[tensor_event.wall_time,
tensor_event.step,
tf.make_ndarray(tensor_event.tensor_proto).item()]
for tensor_event in tensor_events]
if output_format == OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return (string_io.getvalue(), 'text/csv')
else:
return (values, 'application/json')
@wrappers.Request.application
def tags_route(self, request):
index = self.index_impl()
return http_util.Respond(request, index, 'application/json')
@wrappers.Request.application
def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO: return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
output_format = request.args.get('format')
(body, mime_type) = self.scalars_impl(tag, run, output_format)
return http_util.Respond(request, body, mime_type)
|
Python
| 0.000001
|
@@ -1879,20 +1879,15 @@
-return bool(
+if not
self
@@ -1903,43 +1903,108 @@
exer
-) and any(self.index_impl().values(
+:%0A return False%0A%0A return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME
))%0A%0A
|
0fa9575141c522e9e30ee7b5cfc250e1a72f01c8
|
Fix off-by-one-bug in MAX_ATTEMPTS implementation
|
background_task/models.py
|
background_task/models.py
|
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
import django
import inspect
from django.utils import timezone
from datetime import datetime, timedelta
from hashlib import sha1
import traceback
import logging
from compat import StringIO
from background_task.models_completed import CompletedTask
import json
# inspired by http://github.com/tobi/delayed_job
#
# Django 1.6 renamed Manager's get_query_set to get_queryset, and the old
# function will be removed entirely in 1.8. We work back to 1.4, so use a
# metaclass to not worry about it.
# from https://github.com/mysociety/mapit/blob/master/mapit/djangopatch.py#L14-L42
try:
from django.utils import six
except ImportError: # Django < 1.4.2
import six
if django.get_version() < '1.6':
class GetQuerySetMetaclass(type):
def __new__(cls, name, bases, attrs):
new_class = super(GetQuerySetMetaclass, cls).__new__(cls, name, bases, attrs)
old_method_name = 'get_query_set'
new_method_name = 'get_queryset'
for base in inspect.getmro(new_class):
old_method = base.__dict__.get(old_method_name)
new_method = base.__dict__.get(new_method_name)
if not new_method and old_method:
setattr(base, new_method_name, old_method)
if not old_method and new_method:
setattr(base, old_method_name, new_method)
return new_class
elif django.get_version() < '1.8':
# Nothing to do, make an empty metaclass
from django.db.models.manager import RenameManagerMethods
class GetQuerySetMetaclass(RenameManagerMethods):
pass
else:
class GetQuerySetMetaclass(type):
pass
class TaskManager(six.with_metaclass(GetQuerySetMetaclass, models.Manager)):
def find_available(self):
now = timezone.now()
qs = self.unlocked(now)
ready = qs.filter(run_at__lte=now, failed_at=None)
return ready.order_by('-priority', 'run_at')
def unlocked(self, now):
max_run_time = getattr(settings, 'MAX_RUN_TIME', 3600)
qs = self.get_queryset()
expires_at = now - timedelta(seconds=max_run_time)
unlocked = Q(locked_by=None) | Q(locked_at__lt=expires_at)
return qs.filter(unlocked)
def new_task(self, task_name, args=None, kwargs=None,
run_at=None, priority=0):
args = args or ()
kwargs = kwargs or {}
if run_at is None:
run_at = timezone.now()
task_params = json.dumps((args, kwargs))
s = "%s%s" % (task_name, task_params)
task_hash = sha1(s.encode('utf-8')).hexdigest()
return Task(task_name=task_name,
task_params=task_params,
task_hash=task_hash,
priority=priority,
run_at=run_at)
def get_task(self, task_name, args=None, kwargs=None):
args = args or ()
kwargs = kwargs or {}
task_params = json.dumps((args, kwargs))
s = "%s%s" % (task_name, task_params)
task_hash = sha1(s.encode('utf-8')).hexdigest()
qs = self.get_queryset()
return qs.filter(task_hash=task_hash)
def drop_task(self, task_name, args=None, kwargs=None):
return self.get_task(task_name, args, kwargs).delete()
@python_2_unicode_compatible
class Task(models.Model):
# the "name" of the task/function to be run
task_name = models.CharField(max_length=255, db_index=True)
# the json encoded parameters to pass to the task
task_params = models.TextField()
# a sha1 hash of the name and params, to lookup already scheduled tasks
task_hash = models.CharField(max_length=40, db_index=True)
# what priority the task has
priority = models.IntegerField(default=0, db_index=True)
# when the task should be run
run_at = models.DateTimeField(db_index=True)
# how many times the task has been tried
attempts = models.IntegerField(default=0, db_index=True)
# when the task last failed
failed_at = models.DateTimeField(db_index=True, null=True, blank=True)
# details of the error that occurred
last_error = models.TextField(blank=True)
# details of who's trying to run the task at the moment
locked_by = models.CharField(max_length=64, db_index=True,
null=True, blank=True)
locked_at = models.DateTimeField(db_index=True, null=True, blank=True)
objects = TaskManager()
def params(self):
args, kwargs = json.loads(self.task_params)
# need to coerce kwargs keys to str
kwargs = dict((str(k), v) for k, v in kwargs.items())
return args, kwargs
def lock(self, locked_by):
now = timezone.now()
unlocked = Task.objects.unlocked(now).filter(pk=self.pk)
updated = unlocked.update(locked_by=locked_by, locked_at=now)
if updated:
return Task.objects.get(pk=self.pk)
return None
def _extract_error(self, type, err, tb):
file = StringIO()
traceback.print_exception(type, err, tb, None, file)
return file.getvalue()
def reschedule(self, type, err, traceback):
self.last_error = self._extract_error(type, err, traceback)
max_attempts = getattr(settings, 'MAX_ATTEMPTS', 25)
if self.attempts >= max_attempts:
self.failed_at = timezone.now()
logging.warn('Marking task %s as failed', self)
else:
self.attempts += 1
backoff = timedelta(seconds=(self.attempts ** 4) + 5)
self.run_at = timezone.now() + backoff
logging.warn('Rescheduling task %s for %s later at %s', self,
backoff, self.run_at)
# and unlock
self.locked_by = None
self.locked_at = None
self.save()
def save(self, *arg, **kw):
# force NULL rather than empty string
self.locked_by = self.locked_by or None
return super(Task, self).save(*arg, **kw)
def __str__(self):
return u'Task(%s)' % self.task_name
class Meta:
db_table = 'background_task'
|
Python
| 0.000008
|
@@ -5409,16 +5409,43 @@
ceback)%0A
+ self.attempts += 1%0A
@@ -5497,17 +5497,16 @@
S', 25)%0A
-%0A
@@ -5661,39 +5661,8 @@
se:%0A
- self.attempts += 1%0A
|
900d872d4d1f8a593f25ac982e48ac86660955fd
|
Store name unique
|
bazaar/listings/models.py
|
bazaar/listings/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from ..fields import MoneyField
from ..goods.models import Product
@python_2_unicode_compatible
class Listing(models.Model):
title = models.CharField(max_length=100)
description = models.TextField(max_length=500, blank=True)
sales_units = models.IntegerField(default=1)
# TODO: this should become a gallery
image = models.ImageField(upload_to="listing_images")
product = models.ManyToManyField(Product, related_name="listings")
def __str__(self):
return self.title
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=100)
url = models.URLField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publishing(models.Model):
external_id = models.CharField(max_length=128)
price = MoneyField()
available_units = models.IntegerField()
published = models.BooleanField(default=False)
last_update = models.DateTimeField(default=timezone.now)
listing = models.ForeignKey(Listing, related_name="publishings")
store = models.ForeignKey(Store, related_name="publishings")
def __str__(self):
return "Publishing %s on %s" % (self.external_id, self.store)
|
Python
| 0.999952
|
@@ -761,24 +761,37 @@
x_length=100
+, unique=True
)%0A url =
|
19df232461679b3156f9d5889d59f095e0b97d60
|
Add CAN_DETECT
|
bears/yml/RAMLLintBear.py
|
bears/yml/RAMLLintBear.py
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
@linter(executable='ramllint',
output_format='regex',
output_regex=r'(?P<severity>error|warning|info).*\n (?P<message>.+) '
r'\[(?P<origin>.+)\]')
class RAMLLintBear:
"""
RAML Linter is a static analysis, linter-like, utility that will enforce
rules on a given RAML document, ensuring consistency and quality.
Note: Files should not have leading empty lines, else the bear fails to
identify the problems correctly.
"""
LANGUAGES = {"RAML"}
REQUIREMENTS = {NpmRequirement('ramllint', '1.2.2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
@staticmethod
def create_arguments(filename, file, config_file):
return filename,
|
Python
| 0.998251
|
@@ -798,16 +798,58 @@
GPL-3.0'
+%0A CAN_DETECT = %7B'Syntax', 'Formatting'%7D
%0A%0A @s
|
d399f1910df7a14b9f2f36ef1d08cb7bdb839781
|
Revise to t_char_count_d and comments
|
lc0076_minimum_window_substring.py
|
lc0076_minimum_window_substring.py
|
"""Leetcode 76. Minimum Window Substring
Hard
URL: https://leetcode.com/problems/minimum-window-substring/
Given a string S and a string T, find the minimum window in S which will contain
all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
- If there is no such window in S that covers all characters in T, return the
empty string "".
- If there is such window, you are guaranteed that there will always be only one
unique minimum window in S.
"""
class SolutionCharCountDictTwoPointers(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(m+n), where
- m: lenght of s,
- n: lenght of t.
Space complexity: O(m+n).
"""
from collections import defaultdict
s_len, t_len = len(s), len(t)
# Use dict to collect char counts of t.
t_char_counts = defaultdict(int)
for c in t:
t_char_counts[c] += 1
# Track min left & len, and counter.
min_left = 0
min_len = float('inf')
counter = t_len
### s = "ADOBECODEBANC"; t = "ABC"
# Apply two pointers method with left & right from head as a window.
left, right = 0, 0
# In s, move right to increase window to satify t.
while right < s_len:
# If the char exits in t, decrement counter.
if t_char_counts[s[right]] > 0:
counter -= 1
# Decrement t_char_counts and increment right.
t_char_counts[s[right]] -= 1
right += 1
# While we found valid window, move left to shorten it.
while counter == 0:
# Update min_len and min_left if improve min_len.
if right - left < min_len:
min_len = right - left
min_left = left
# Before increment left, add back t_char_counts & counter.
t_char_counts[s[left]] += 1
if t_char_counts[s[left]] > 0:
counter += 1
left += 1
if min_len < float('inf'):
return s[min_left:(min_left + min_len)]
else:
return ''
def main():
# Output: "BANC"
s = "ADOBECODEBANC"
t = "ABC"
print SolutionCharCountDictTwoPointers().minWindow(s, t)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -854,47 +854,8 @@
ct%0A%0A
- s_len, t_len = len(s), len(t)%0A%0A
@@ -884,23 +884,22 @@
ect
+t's
char
-
+-%3E
coun
-ts of
t.%0A
@@ -917,17 +917,18 @@
ar_count
-s
+_d
= defau
@@ -983,17 +983,18 @@
ar_count
-s
+_d
%5Bc%5D += 1
@@ -1031,16 +1031,18 @@
en, and
+t_
counter.
@@ -1094,24 +1094,25 @@
('inf')%0A
+%0A
counter
@@ -1095,32 +1095,34 @@
'inf')%0A%0A
+t_
counter = t_len%0A
@@ -1119,57 +1119,15 @@
r =
-t_
len
-%0A%0A ### s = %22ADOBECODEBANC%22; t = %22ABC%22
+(t)%0A
%0A
@@ -1156,15 +1156,8 @@
ers
-method
with
@@ -1306,13 +1306,14 @@
t %3C
-s_
len
+(s)
:%0A
@@ -1327,19 +1327,21 @@
# If
-the
+right
char ex
@@ -1360,16 +1360,18 @@
crement
+t_
counter.
@@ -1390,33 +1390,34 @@
if t_char_count
-s
+_d
%5Bs%5Bright%5D%5D %3E 0:%0A
@@ -1424,32 +1424,34 @@
+t_
counter -= 1%0A%0A
@@ -1484,17 +1484,18 @@
ar_count
-s
+_d
and inc
@@ -1532,17 +1532,18 @@
ar_count
-s
+_d
%5Bs%5Bright
@@ -1599,28 +1599,25 @@
le w
-e found valid window
+indow satisfies t
, mo
@@ -1657,16 +1657,18 @@
while
+t_
counter
@@ -1928,12 +1928,15 @@
ount
-s
+_d
&
+t_
coun
@@ -1960,33 +1960,34 @@
t_char_count
-s
+_d
%5Bs%5Bleft%5D%5D += 1%0A
@@ -2016,17 +2016,18 @@
ar_count
-s
+_d
%5Bs%5Bleft%5D
@@ -2053,16 +2053,18 @@
+t_
counter
@@ -2354,16 +2354,113 @@
(s, t)%0A%0A
+ s = %22ABBBBBBBBBA%22%0A t = %22AA%22%0A print SolutionCharCountDictTwoPointers().minWindow(s, t)%0A%0A
%0Aif __na
|
4ccd5fff46c98c3927dc7e85f961dc7e75dba434
|
Fix temperature sampler loop termination condition:
|
examples/lm1b/temperature_sampler.py
|
examples/lm1b/temperature_sampler.py
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast decoding routines for inference from a trained language model."""
from jax import lax
from jax import random
import jax.numpy as jnp
# Constants
# The default End-of-Sentence token id is 2 (SentencePiece).
EOS_ID = 2
def temperature_sample(prompt_inputs,
init_cache,
tokens_to_logits,
prng_key,
temperature=1.0,
topk=20,
eos_token=EOS_ID):
"""Temperature sampling for language model generation.
Args:
prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens.
init_cache: flax attention cache.
tokens_to_logits: fast autoregressive decoder function taking single token
slices and cache and returning next-token logits and updated cache.
prng_key: JAX PRNGKey.
temperature: float: sampling temperature factor. As it approaches
zero this becomes equivalent to greedy sampling.
topk: integer: if nonzero only use the top-k logits to sample next token,
if zero don't use any cutoff and sample from full logits over vocabulary.
eos_token: int: end-of-sentence token for target vocabulary.
Returns:
Array of sampled sequences: [batch_size, max_decode_len]
"""
batch_size = prompt_inputs.shape[0]
max_decode_len = prompt_inputs.shape[1]
end_marker = jnp.array(eos_token)
temperature = jnp.array(temperature)
# Initialize sampling loop state.
# initial loop PRNGKey
rng0 = prng_key
# loop position counter.
i0 = jnp.array(0)
# per batch-item holding current token in loop.
token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)
# per batch-item state bit indicating if sentence has finished.
ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_)
# (batch, length) array containing prefix prompt tokens for sampling loop
# as well as the generated output of newly sampled tokens.
sequences0 = prompt_inputs
# Sampling loop state is stored in a simple tuple.
sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0)
def sampling_loop_cond_fn(state):
"""Sampling loop termination condition."""
(i, _, _, _, ended, _) = state
# Have we reached max decoding length?
not_at_end = (i <= max_decode_len)
# Have all sampled sequences reached an end marker?
all_sequences_ended = jnp.all(ended)
return not_at_end & (~all_sequences_ended)
def sampling_loop_body_fn(state):
"""Sampling loop state update."""
i, sequences, cache, cur_token, ended, rng = state
# Split RNG for sampling.
rng1, rng2 = random.split(rng)
# Call fast-decoder model on current tokens to get next-position logits.
logits, new_cache = tokens_to_logits(cur_token, cache)
# Sample next token from logits.
# TODO(levskaya): add top-p "nucleus" sampling option.
if topk:
# Get top-k logits and their indices, sample within these top-k tokens.
topk_logits, topk_idxs = lax.top_k(logits, topk)
topk_token = jnp.expand_dims(random.categorical(
rng1, topk_logits / temperature).astype(jnp.int32), axis=-1)
# Return the original indices corresponding to the sampled top-k tokens.
next_token = jnp.squeeze(
jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1)
else:
next_token = random.categorical(
rng1, logits / temperature).astype(jnp.int32)
# Only use sampled tokens if we're past provided prefix tokens.
out_of_prompt = (sequences[:, i+1] == 0)
next_token = (next_token * out_of_prompt +
sequences[:, i+1] * ~out_of_prompt)
# If end-marker reached for batch item, only emit padding tokens.
next_token_or_endpad = next_token * ~ended
ended |= (next_token_or_endpad == end_marker)
# Add current sampled tokens to recorded sequences.
new_sequences = lax.dynamic_update_slice(
sequences, next_token_or_endpad, (0, i+1))
return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2)
# Run sampling loop and collect final state.
final_state = lax.while_loop(sampling_loop_cond_fn,
sampling_loop_body_fn,
sampling_loop_init_state)
# Pick part of the state corresponding to the sampled sequences.
final_sequences = final_state[1]
return final_sequences
|
Python
| 0.996778
|
@@ -2848,17 +2848,16 @@
d = (i %3C
-=
max_dec
|
9513011caa73cbfa4aec2b96070f482466dde490
|
Fix urlshorten output
|
plugins/internet/url.py
|
plugins/internet/url.py
|
# -*- coding: utf-8 -*-
import bot
from html.parser import HTMLParser
class LinksParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.data = []
def handle_starttag(self, tag, attributes):
if tag != 'title':
return
if self.recording:
self.recording += 1
return
self.recording = 1
def handle_endtag(self, tag):
if tag == 'title' and self.recording:
self.recording -= 1
def handle_data(self, data):
if self.recording:
self.data.append(data)
class Module(bot.Module):
index = "url"
def register(self):
self.addcommand(
self.title,
"title",
"Get the title of a url.",
["url"])
self.addcommand(
self.urlshorten,
"urlshorten",
"Shorten a url using is.gd",
["-shorturl=custom ending", "-service=is.gd or v.gd", "url"]
)
def title(self, context, args):
try:
r = self.server.rget("http.url").request(args.getstr("url"),
timeout=4)
except self.server.rget("http.url").Error:
return "Error while trying to read that url."
p = LinksParser()
p.feed(r.read())
p.close()
return p.data[-1] if p.data else "No title found."
def urlshorten(self, context, args):
args.default("service", "is.gd")
args.default("shorturl", "")
shorturl = args.getstr("shorturl")
service = args.getstr("service")
if service in ['v.gd', 'is.gd']:
params = {
"url": args.getstr("url"),
"format": "simple",
"shorturl": shorturl
}
serviceurl = "http://" + service + "/create.php"
http = self.server.rget("http.url")
try:
r = http.request(serviceurl,
timeout=4,
params=params)
except http.HTTPError as error:
r = error
return r.read().decode("utf-8")
else:
return "Service must be is.gd or v.gd."
bot.register.module(Module)
|
Python
| 0.999998
|
@@ -1987,18 +1987,21 @@
r
- =
+eturn
http.re
@@ -2109,16 +2109,23 @@
=params)
+.read()
%0A
@@ -2169,30 +2169,8 @@
- r = error%0A
@@ -2184,16 +2184,20 @@
return
+erro
r.read()
|
824a2a547218febf61aed8d99eff5ddeeaf6f5ca
|
Remove unused imports
|
polyaxon/libs/models.py
|
polyaxon/libs/models.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from django.core.validators import validate_slug
from django.db import models
from django.core.cache import cache
from libs.blacklist import validate_blacklist_name
class DescribableModel(models.Model):
description = models.TextField(blank=True, null=True)
class Meta:
abstract = True
class DiffModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class TypeModel(models.Model):
name = models.CharField(max_length=128, unique=True)
schema_definition = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.name
class Singleton(DiffModel):
"""A base model to represents a singleton."""
class Meta:
abstract = True
def set_cache(self):
cache.set(self.__class__.__name__, self)
def save(self, *args, **kwargs):
self.pk = 1
super(Singleton, self).save(*args, **kwargs)
self.set_cache()
def delete(self, *args, **kwargs):
pass
@classmethod
def may_be_update(cls, obj):
raise NotImplementedError
@classmethod
def load(cls):
raise NotImplementedError
|
Python
| 0.000001
|
@@ -87,57 +87,8 @@
on%0A%0A
-from django.core.validators import validate_slug%0A
from
@@ -153,60 +153,8 @@
he%0A%0A
-from libs.blacklist import validate_blacklist_name%0A%0A
%0Acla
|
814cc6cef757c3eef775240c749a098b1288eef3
|
Enable searching for an image in the admin
|
pombola/images/admin.py
|
pombola/images/admin.py
|
from django.contrib import admin
from django.contrib.contenttypes.generic import GenericTabularInline
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.admin import AdminImageMixin
from pombola.images import models
class ImageAdmin(AdminImageMixin, admin.ModelAdmin):
list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ]
def thumbnail(self, obj):
if obj.image:
im = get_thumbnail(obj.image, '100x100')
return '<img src="%s" />' % ( im.url )
else:
return "NO IMAGE FOUND"
thumbnail.allow_tags = True
class ImageAdminInline(AdminImageMixin, GenericTabularInline):
model = models.Image
extra = 0
can_delete = True
admin.site.register( models.Image, ImageAdmin )
|
Python
| 0
|
@@ -358,16 +358,75 @@
rce', %5D
+%0A search_fields = %5B'person__legal_name', 'id', 'source'%5D
%0A%0A de
|
c99d5d30a698aafe3e554c48c9a47dd8be1a5575
|
Use imap instead of map
|
library.py
|
library.py
|
import json
import logging
import os
import subprocess
import urllib
import grequests
import numpy
logging.basicConfig()
logger = logging.getLogger("recheck")
logger.setLevel(logging.DEBUG)
def get_change_ids(repo_path, subtree=None, since="6.months"):
"""Return array of change-Ids of merged patches.
returns list starting with most recent change
repo_path: file path of repo
since: how far back to look
"""
change_ids = []
cwd = os.getcwd()
os.chdir(repo_path)
command = "git log --no-merges --since=%s master" % since
if subtree:
command = command + " " + subtree
log = subprocess.check_output(command.split(' '))
os.chdir(cwd)
lines = log.splitlines()
for line in lines:
if line.startswith(" Change-Id: "):
change_id = line.split()[1]
if len(change_id) != 41 or change_id[0] != "I":
raise Exception("Invalid Change-Id: %s" % change_id)
change_ids.append(change_id)
return change_ids
def query_gerrit(template, change_ids, repo_name):
"""query gerrit."""
queries = []
template = "https://review.openstack.org" + template
for change_id in change_ids:
# ChangeIDs can be used in multiple branches/repos
patch_id = urllib.quote_plus("%s~master~" % repo_name) + change_id
queries.append(template % patch_id)
unsent = (grequests.get(query) for query in queries)
for r in grequests.map(unsent, size=10):
try:
yield json.loads(r.text[4:])
except AttributeError:
# request must have failed, ignore it and move on
logger.debug("failed to parse gerrit response")
pass
def get_change_details(change_ids, repo_name):
"""get gerrit change details for a list of change_id.
Returns a generator
"""
return query_gerrit("/changes/%s/detail", change_ids, repo_name)
def get_latest_revision(change_ids, repo_name):
"""get latest revisions for a list of change_ids.
Returns a generator
"""
return query_gerrit("/changes/%s/revisions/current/review",
change_ids, repo_name)
def stats(values):
print "Average: %s" % numpy.mean(values)
print "median: %s" % numpy.median(values)
print "variance: %s" % numpy.var(values)
|
Python
| 0
|
@@ -1459,16 +1459,17 @@
equests.
+i
map(unse
|
b0f5913d5f775062b8d5e253e1403b995b67c81a
|
Bump to version 3.2.0
|
post_office/__init__.py
|
post_office/__init__.py
|
VERSION = (3, 2, 0, 'dev')
from .backends import EmailBackend
default_app_config = 'post_office.apps.PostOfficeConfig'
|
Python
| 0
|
@@ -15,15 +15,8 @@
2, 0
-, 'dev'
)%0A%0Af
|
f776c3f05a30375d1082c8b2ff1c346822777c0c
|
debug output slipped through
|
pprof/utils/compiler.py
|
pprof/utils/compiler.py
|
from pprof.settings import config
def lt_clang(cflags, ldflags, func=None):
"""Return a clang that hides :cflags: and :ldflags: from reordering of
libtool.
This will generate a wrapper script in :p:'s builddir and return a path
to it.
:cflags: the cflags libtool is not allowed to see.
:ldflags: the ldflags libtool is not allowed to see.
:returns: path to the new clang.
"""
from plumbum import local
print_libtool_sucks_wrapper("clang", cflags, ldflags, clang, func)
return local["./clang"]
def lt_clang_cxx(cflags, ldflags, func=None):
"""Return a clang that hides :cflags: and :ldflags: from reordering of
libtool.
This will generate a wrapper script in :p:'s builddir and return a path
to it.
:cflags: the cflags libtool is not allowed to see.
:ldflags: the ldflags libtool is not allowed to see.
:returns: path to the new clang.
"""
from plumbum import local
print_libtool_sucks_wrapper("clang++", cflags, ldflags, clang_cxx, func)
return local["./clang++"]
def print_libtool_sucks_wrapper(filepath, cflags, ldflags, compiler, func):
"""Print a libtool wrapper that hides :flags_to_hide: from libtool.
:filepath:
Where should the new compiler be?
:flags_to_hide:
List of flags that should be hidden from libtool
:compiler:
The compiler we should actually call
"""
from plumbum.cmd import chmod
from cloud.serialization import cloudpickle as cp
from pprof.project import PROJECT_BLOB_F_EXT
from os.path import abspath
blob_f = abspath(filepath + PROJECT_BLOB_F_EXT)
if func is not None:
with open(blob_f, 'wb') as b:
b.write(cp.dumps(func))
with open(filepath, 'w') as wrapper:
lines = '''#!/usr/bin/env python
# encoding: utf-8
from plumbum import ProcessExecutionError, local, FG
from pprof.experiment import to_utf8
from os import path
import pickle
cc=local[\"{CC}\"]
cflags={CFLAGS}
ldflags={LDFLAGS}
from sys import argv
import os
import sys
def call_original_compiler(input_files, cc, cflags, ldflags, flags):
final_command = None
retcode=0
try:
if len(input_files) > 0:
if "-c" in flags:
final_command = cc["-Qunused-arguments", cflags, flags]
else:
final_command = cc["-Qunused-arguments", cflags, flags, ldflags]
else:
final_command = cc["-Qunused-arguments", flags]
retcode, stdout, stderr = final_command.run()
if len(stdout) > 0:
print stdout
if len(stderr) > 0:
print stderr
except ProcessExecutionError as e:
sys.stderr.write(to_utf8(str(e.stderr)))
sys.stderr.flush()
sys.exit(e.retcode)
return (retcode, final_command)
input_files = [ x for x in argv[1:] if not '-' is x[0] ]
flags = argv[1:]
f = None
if path.exists("{blobf}"):
with open("{blobf}", "rb") as p:
f = pickle.load(p)
with local.env(PPROF_DB_HOST="{db_host}",
PPROF_DB_PORT="{db_port}",
PPROF_DB_NAME="{db_name}",
PPROF_DB_USER="{db_user}",
PPROF_DB_PASS="{db_pass}"):
retcode, final_cc = call_original_compiler(input_files, cc, cflags,
ldflags, flags)
with local.env(PPROF_CMD=str(final_cc)):
print "Fun: {{}}".format(f)
if f is not None:
if not sys.stdin.isatty():
f(final_cc, has_stdin = True)
else:
f(final_cc)
sys.exit(retcode)
'''.format(CC=str(compiler()), CFLAGS=cflags, LDFLAGS=ldflags,
blobf=blob_f, db_host=config["db_host"],
db_name=config["db_name"], db_user=config["db_user"],
db_pass=config["db_pass"], db_port=config["db_port"])
wrapper.write(lines)
chmod("+x", filepath)
def llvm():
from os import path
return path.join(config["llvmdir"], "bin")
def llvm_libs():
from os import path
return path.join(config["llvmdir"], "lib")
def clang_cxx():
from os import path
from plumbum import local
return local[path.join(llvm(), "clang++")]
def clang():
from os import path
from plumbum import local
return local[path.join(llvm(), "clang")]
|
Python
| 0.000055
|
@@ -3377,44 +3377,8 @@
)):%0A
- print %22Fun: %7B%7B%7D%7D%22.format(f)%0A
|
f28732596487a2a0fc664c5444e618ce5c23eccd
|
fix usage
|
bin/extract_darkmatter.py
|
bin/extract_darkmatter.py
|
#!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="cfile", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], 1)
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
Python
| 0.000002
|
@@ -280,24 +280,38 @@
, %22--input%22,
+ dest=%22input%22,
help=%22Name
@@ -379,24 +379,39 @@
%22--output%22,
+ dest=%22output%22,
help=%22Name
@@ -495,13 +495,12 @@
st=%22
-cfile
+sims
%22, h
|
f93e23db1d5cedbdc75ef6b412f52f8b3800a270
|
use more versatile valueByTag mechanism
|
applications/plugins/RigidScale/python/RigidScale/sml.py
|
applications/plugins/RigidScale/python/RigidScale/sml.py
|
import Sofa
import RigidScale.API
import SofaPython.sml
import Compliant.StructuralAPI
import Compliant.sml
printLog = True
def insertRigidScale(parentNode, solidModel, param):
""" create a RigidScale.API.ShearlessAffineBody from the solidModel
"""
if printLog:
Sofa.msg_info("RigidScale.sml", "insertRigidScale "+solidModel.name)
body = RigidScale.API.ShearlessAffineBody(parentNode, solidModel.name)
# massinfo = SofaPython.sml.getSolidRigidMassInfo(rigidModel, density)
# body.setFromRigidInfo(massinfo, offset=solidModel.position , inertia_forces = False )
if (not len(solidModel.mesh)==1):
Sofa.msg_warning("RigidScale.sml", "insertRigidScale support only single mesh solid (nb meshes={0}) - solid {1} ignored".format(len(solidModel.mesh), solidModel.name))
return None
body.setFromMesh(solidModel.mesh[0].source, voxelSize=SofaPython.units.length_from_SI(param.voxelSize), density=SofaPython.units.massDensity_from_SI(1000.), offset=solidModel.position)
body.addElasticBehavior("behavior", stiffness=SofaPython.units.elasticity_from_SI(param.rigidScaleStiffness), poissonCoef=0, numberOfGaussPoint=8)
cm = body.addCollisionMesh(solidModel.mesh[0].source, offset=solidModel.position)
cm.addVisualModel()
body.affineDofs.showObject=param.showAffine
body.affineDofs.showObjectScale=SofaPython.units.length_from_SI(param.showAffineScale)
return body
class SceneArticulatedRigidScale(SofaPython.sml.BaseScene):
""" Builds a (sub)scene from a model using compliant formulation
[tag] solid tagged with rigidScale are simulated as ShearlessAffineBody, more tags can be added to param.rigidScaleTags
[tag] mesh group tagged with rigidScalePosition are used to compute (barycenter) the positions of a rigidScale
Compliant joints are setup between the bones """
def __init__(self, parentNode, model):
SofaPython.sml.BaseScene.__init__(self, parentNode, model)
self.rigidScales = dict()
self.joints = dict()
## params
# the set of tags simulated as rigids
self.param.rigidScaleTags={"rigidScale"}
self.param.voxelSize = 0.005 # SI unit (m)
# simulation
self.param.jointIsCompliance = False
self.param.jointCompliance = 1e-6
self.param.rigidScaleStiffness = 10e3 # SI unit
# for tagged joints, values come from these dictionnaries if they contain one of the tag
self.param.jointIsComplianceByTag=dict()
self.param.jointComplianceByTag=dict()
# visual
self.param.showAffine=False
self.param.showAffineScale=0.05 # SI unit (m)
self.param.showOffset=False
self.param.showOffsetScale=0.01 # SI unit (m)
def createScene(self):
self.node.createObject('RequiredPlugin', name='image')
self.node.createObject('RequiredPlugin', name='Flexible')
self.node.createObject('RequiredPlugin', name='Compliant')
self.node.createObject('RequiredPlugin', name='RigidScale')
# rigidScale
for tag in self.param.rigidScaleTags:
if tag in self.model.solidsByTag:
for solidModel in self.model.solidsByTag[tag]:
self.rigidScales[solidModel.id] = insertRigidScale(self.node, solidModel, self.param)
# joints
for jointModel in self.model.genericJoints.values():
self.joints[jointModel.id] = Compliant.sml.insertJoint(jointModel, self.rigidScales, self.param)
|
Python
| 0
|
@@ -2221,95 +2221,8 @@
ion%0A
- self.param.jointIsCompliance = False%0A self.param.jointCompliance = 1e-6%0A
@@ -2273,16 +2273,16 @@
SI unit%0A
+
@@ -2370,16 +2370,17 @@
the tag%0A
+%0A
@@ -2444,38 +2444,153 @@
am.joint
-ComplianceByTag=dict()
+IsComplianceByTag%5B%22default%22%5D=False%0A self.param.jointComplianceByTag=dict()%0A self.param.jointComplianceByTag%5B%22default%22%5D=1e-6
%0A%0A
|
a5857bc5b019dda8baca03bd68f08b4a26a85911
|
add import module in init file.
|
biokit/rtools/__init__.py
|
biokit/rtools/__init__.py
|
Python
| 0
|
@@ -0,0 +1,22 @@
+from .rtools import *%0A
|
|
5ac8d6824a53c05ac233c9dcaf7d39171bafed31
|
add params to the grid
|
fedoracommunity/mokshaapps/demos/controllers/root.py
|
fedoracommunity/mokshaapps/demos/controllers/root.py
|
# This file is part of Fedora Community.
# Copyright (C) 2008-2009 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from moksha.lib.base import Controller
from tg import expose, tmpl_context
from moksha.api.widgets import ContextAwareWidget, Grid
from moksha.api.widgets.containers import DashboardContainer
from moksha.lib.helpers import Category, MokshaApp
from tw.api import Widget, JSLink, js_function
from tg import config
orbited_host = config.get('orbited_host', 'localhost')
orbited_port = config.get('orbited_port', 9000)
if orbited_port:
orbited_url = '%s:%s' % (orbited_host, orbited_port)
else:
orbited_url = orbited_host
orbited_js = JSLink(link=orbited_url + '/static/Orbited.js')
kamaloka_protocol_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/amqp.protocol.js',
javascript=[orbited_js])
kamaloka_protocol_0_10_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/amqp.protocol_0_10.js',
javascript=[kamaloka_protocol_js])
kamaloka_qpid_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/qpid_amqp.js',
javascript=[kamaloka_protocol_0_10_js])
timeping_demo_app = MokshaApp('Timeping AMQP Demo', 'fedoracommunity.demos/timeping_demo',
content_id='timeping_demo',
params={'rows_per_page': 10,
'show_title': True,
'filters':{}
})
class DemoContainer(DashboardContainer, ContextAwareWidget):
layout = [Category('full_sized_demo_apps',
timeping_demo_app)
]
demo_container = DemoContainer('demo')
class TimepingGrid(Grid, ContextAwareWidget):
template='mako:fedoracommunity.mokshaapps.demos.templates.timeping_grid'
javascript=Grid.javascript + [kamaloka_qpid_js]
params=[]
resource=None
resource_path=None
timeping_demo_grid = TimepingGrid('timeping_grid')
class RootController(Controller):
@expose('mako:moksha.templates.widget')
def index(self):
options = {}
tmpl_context.widget = demo_container
return {'options':options}
@expose('mako:moksha.templates.widget')
def timeping_demo(self, **kwds):
options = {'orbited_port': orbited_port,
'orbited_host': orbited_host}
tmpl_context.widget = timeping_demo_grid
return {'options':options}
|
Python
| 0.000001
|
@@ -2688,16 +2688,46 @@
params=%5B
+'orbited_port', 'orbited_host'
%5D%0A re
@@ -2757,24 +2757,75 @@
e_path=None%0A
+ orbited_port=9000%0A orbited_host='localhost'%0A
%0Atimepin
|
ca856016d54e4ca19c9b6701f2a4f1061bfb2fda
|
Wrong column name
|
printer_tray/printer.py
|
printer_tray/printer.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cups
from openerp import models, fields, api
class Printer(models.Model):
_inherit = 'printing.printer'
tray_ids = fields.One2many(comodel_name='printing.tray',
inverse_name='printer_id',
string='Paper Sources')
@api.multi
def _prepare_update_from_cups(self, cups_connection, cups_printer):
vals = super(Printer, self)._prepare_update_from_cups(cups_connection,
cups_printer)
ppd_file_path = cups_connection.getPPD3(self.system_name)
if not ppd_file_path[2]:
return vals
ppd = cups.PPD(ppd_file_path[2])
option = ppd.findOption('InputSlot')
if not option:
return vals
vals_trays = []
tray_names = set(tray.system_name for tray in self.tray_ids)
for tray_option in option.choices:
if tray_option['choice'] not in tray_names:
tray_vals = {
'name': tray_option['text'],
'system_name': tray_option['choice'],
}
vals_trays.append((0, 0, tray_vals))
cups_trays = set(tray_option['choice'] for tray_option
in option.choices)
for tray in self.tray_ids:
if tray.system_name not in cups_trays:
vals_trays.append((2, tray.id))
vals['tray_ids'] = vals_trays
return vals
@api.multi
def print_options(self, report, format):
""" Hook to define Tray """
printing_act_obj = self.env['printing.report.xml.action']
options = super(Printer, self).print_options(report, format)
# Retrieve user default values
user = self.env.user
tray = user.printer_tray_id
# Retrieve report default values
if report.printer_tray_id:
tray = report.printer_tray_id
# Retrieve report-user specific values
action = printing_act_obj.search([('report_id', '=', report.id),
('user_id', '=', self.env.uid),
('action', '!=', 'user_default')],
limit=1)
if action and action.tray_id:
tray = action.tray_id
if tray:
options['InputSlot'] = str(tray.system_name)
return options
|
Python
| 0.890483
|
@@ -3229,24 +3229,32 @@
and action.
+printer_
tray_id:%0A
|
1a2f3f74a398422fe70d6b482cdd779f728e9a21
|
add 'continue' lines, change print to logging
|
scripts/migrations/034-update_subscriptions_ticket_and_mr_titles.py
|
scripts/migrations/034-update_subscriptions_ticket_and_mr_titles.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
import sys
from pylons import tmpl_context as c
from bson import ObjectId
from ming.odm import session
from ming.orm import ThreadLocalORMSession
from allura import model as M
from forgetracker import model as TM
log = logging.getLogger(__name__)
def main():
task = sys.argv[-1]
c.project = None
# Fix ticket artifcat titles
title = re.compile('^Ticket [0-9]')
subs_tickets = M.Mailbox.query.find(dict(artifact_title=title)).all()
print 'Found total %d old artifact titles (tickets).' % len(subs_tickets)
for sub in subs_tickets:
ticket = TM.Ticket.query.get(_id = ObjectId(sub.artifact_index_id.split('#')[1]))
if not ticket:
print 'Could not find ticket for %s' % sub
new_title = 'Ticket #%d: %s' % (ticket.ticket_num, ticket.summary)
print '"%s" --> "%s"' % (sub.artifact_title , new_title)
if(task != 'diff'):
sub.artifact_title = new_title
session(sub).flush(sub)
# Fix merge request artifact titles
title = re.compile('^Merge request: ')
subs_mrs = M.Mailbox.query.find(dict(artifact_title=title)).all()
print 'Found total %d old artifact titles (merge_requests).' % len(subs_tickets)
for sub in subs_mrs:
merge_request = M.MergeRequest.query.get(_id = ObjectId(sub.artifact_index_id.split('#')[1]))
if not merge_request:
print 'Could not find merge request for %s' % sub
new_title = 'Merge Request #%d: %s' % (merge_request.request_number, merge_request.summary)
print '"%s" --> "%s"' % (sub.artifact_title , new_title)
if(task != 'diff'):
sub.artifact_title = new_title
session(sub).flush(sub)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -1341,38 +1341,41 @@
tle)).all()%0A
-print
+log.info(
'Found total %25d
@@ -1401,26 +1401,25 @@
(tickets).'
- %25
+,
len(subs_ti
@@ -1416,32 +1416,33 @@
en(subs_tickets)
+)
%0A for sub in
@@ -1455,16 +1455,131 @@
ickets:%0A
+ if not sub.artifact_index_id:%0A log.info('No artifact_index_id on %25s', sub)%0A continue%0A
@@ -1687,38 +1687,41 @@
et:%0A
-print
+log.info(
'Could not find
@@ -1726,38 +1726,59 @@
d ticket for %25s'
- %25
+,
sub
+)%0A continue
%0A new_tit
@@ -1837,38 +1837,41 @@
ummary)%0A
-print
+log.info(
'%22%25s%22 --%3E %22%25s%22'
@@ -1861,36 +1861,34 @@
('%22%25s%22 --%3E %22%25s%22'
+,
-%25 (
sub.artifact_tit
@@ -1881,33 +1881,32 @@
b.artifact_title
-
, new_title)%0A
@@ -2163,22 +2163,25 @@
l()%0A
-print
+log.info(
'Found t
@@ -2226,18 +2226,17 @@
uests).'
- %25
+,
len(sub
@@ -2245,16 +2245,17 @@
tickets)
+)
%0A for
@@ -2272,16 +2272,131 @@
bs_mrs:%0A
+ if not sub.artifact_index_id:%0A log.info('No artifact_index_id on %25s', sub)%0A continue%0A
@@ -2531,22 +2531,25 @@
-print
+log.info(
'Could n
@@ -2581,14 +2581,35 @@
%25s'
- %25
+,
sub
+)%0A continue
%0A
@@ -2717,14 +2717,17 @@
-print
+log.info(
'%22%25s
@@ -2737,20 +2737,18 @@
-%3E %22%25s%22'
+,
-%25 (
sub.arti
@@ -2773,33 +2773,33 @@
itle)%0A if
-(
+
task != 'diff'):
@@ -2788,33 +2788,32 @@
f task != 'diff'
-)
:%0A su
|
94a07652c23f55a20f856550a1ceed549b6b8cd7
|
Updated the expected matching strings.
|
test/global_variables/TestGlobalVariables.py
|
test/global_variables/TestGlobalVariables.py
|
"""Show global variables and check that they do indeed have global scopes."""
import os, time
import unittest2
import lldb
from lldbtest import *
class GlobalVariablesTestCase(TestBase):
mydir = "global_variables"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
self.buildDsym()
self.global_variables()
def test_with_dwarf(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
self.buildDwarf()
self.global_variables()
def global_variables(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
self.expect("breakpoint set -f main.c -l 20", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.c', line = 20, locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# Check that GLOBAL scopes are indicated for the variables.
self.expect("frame variable -s -a", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['GLOBAL: g_file_static_cstr',
'"g_file_static_cstr"',
'GLOBAL: g_file_global_int',
'(int) 42',
'GLOBAL: g_file_global_cstr',
'"g_file_global_cstr"'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Python
| 0.999999
|
@@ -1581,16 +1581,19 @@
able -s
+-g
-a%22, VAR
@@ -1643,32 +1643,47 @@
strs = %5B'GLOBAL:
+ (char const *)
g_file_static_c
@@ -1758,32 +1758,38 @@
'GLOBAL:
+ (int)
g_file_global_i
@@ -1794,40 +1794,10 @@
_int
-',%0A '(int)
+ =
42'
@@ -1829,16 +1829,31 @@
'GLOBAL:
+ (char const *)
g_file_
|
8a9a5abe6bd8ab97b3f8f7d879c229ae34dd27a1
|
add some doc string in tools/math.py
|
abel/tools/math.py
|
abel/tools/math.py
|
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# Roman Yurchak (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
import numpy as np
from scipy.linalg import circulant
from scipy.optimize import curve_fit, brentq
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter
import scipy.ndimage as nd
def gradient(f, x=None, dx=1, axis=-1):
"""
Return the gradient of 1 or 2-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries.
Irregular sampling is supported (it isn't supported by np.gradient)
Parameters
----------
f: 1d or 2d numpy array
Input array.
x: array_like, optional
Points where the function f is evaluated. It must be of the same
length as f.shape[axis].
If None, regular sampling is assumed (see dx)
dx: float, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis: int, optional
The axis along which the difference is taken.
Returns
-------
out: array_like
Returns the gradient along the given axis.
To do:
implement smooth noise-robust differentiators for use on experimental data.
http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
"""
if x is None:
x = np.arange(f.shape[axis]) * dx
else:
assert x.shape[0] == f.shape[axis]
I = np.zeros(f.shape[axis])
I[:2] = np.array([0, -1])
I[-1] = 1
I = circulant(I)
I[0, 0] = -1
I[-1, -1] = 1
I[0, -1] = 0
I[-1, 0] = 0
H = np.zeros((f.shape[axis], 1))
H[1:-1, 0] = x[2:] - x[:-2]
H[0] = x[1] - x[0]
H[-1] = x[-1] - x[-2]
if axis == 0:
return np.dot(I / H, f)
else:
return np.dot(I / H, f.T).T
def gaussian(x, a, mu, sigma, c):
"""
Gaussian function
a * exp(-((x - mu) ** 2) / 2 / sigma ** 2) + c
"""
return a * np.exp(-((x - mu) ** 2) / 2 / sigma ** 2) + c
def guss_gaussian(x):
"""
Find a set of better starting parameters for Gaussian function fitting
"""
c_guess = (x[0] + x[-1]) / 2
a_guess = x.max() - c_guess
mu_guess = x.argmax()
x_inter = interp1d(range(len(x)), x)
def _(i):
return x_inter(i) - a_guess / 2 - c_guess
try:
sigma_l_guess = brentq(_, 0, mu_guess)
except:
sigma_l_guess = len(x) / 4
try:
sigma_r_guess = brentq(_, mu_guess, len(x) - 1)
except:
sigma_r_guess = 3 * len(x) / 4
return a_guess, mu_guess, (sigma_r_guess - sigma_l_guess) / 2.35482, c_guess
def fit_gaussian(x):
"""
Fit Gaussian function and return its parameter
"""
p, q = curve_fit(gaussian, list(range(x.size)), x, p0=guss_gaussian(x))
return p
|
Python
| 0.000005
|
@@ -2064,184 +2064,804 @@
-%22%22%22%0A return a * np.exp(-((x - mu) ** 2) / 2 / sigma ** 2) + c%0A%0A%0Adef guss_gaussian(x):%0A %22%22%22%0A Find a set of better starting parameters for Gaussian function fitting%0A
+ref: https://en.wikipedia.org/wiki/Gaussian_function%0A%0A Parameters%0A ----------%0A x: 1D np.array%0A coordinate%0A%0A a: float%0A the height of the curve's peak%0A%0A mu: float%0A the position of the center of the peak%0A%0A sigma: float%0A the standard deviation, sometimes called the Gaussian RMS width%0A%0A c: float%0A non-zero background%0A%0A Returns%0A -------%0A out: 1D np.array%0A the Gaussian profile%0A %22%22%22%0A return a * np.exp(-((x - mu) ** 2) / 2 / sigma ** 2) + c%0A%0A%0Adef guss_gaussian(x):%0A %22%22%22%0A Find a set of better starting parameters for Gaussian function fitting%0A%0A Parameters%0A ----------%0A x: 1D np.array%0A 1D profile of your data%0A%0A Returns%0A -------%0A out: tuple of float%0A estimated value of (a, mu, sigma, c)
%0A
@@ -3444,17 +3444,173 @@
arameter
-
+%0A%0A Parameters%0A ----------%0A x: 1D np.array%0A 1D profile of your data%0A%0A Returns%0A -------%0A out: tuple of float%0A (a, mu, sigma, c)
%0A %22%22%22
|
8599480ed93a0117f326689280c7a896d6bf697a
|
add version 3.1-4 to r-bayesm (#20807)
|
var/spack/repos/builtin/packages/r-bayesm/package.py
|
var/spack/repos/builtin/packages/r-bayesm/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBayesm(RPackage):
"""Bayesian Inference for Marketing/Micro-Econometrics"""
homepage = "https://cloud.r-project.org/package=bayesm"
url = "https://cloud.r-project.org/src/contrib/bayesm_3.1-0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/bayesm"
version('3.1-3', sha256='51e4827eca8cd4cf3626f3c2282543df7c392b3ffb843f4bfb386fe104642a10')
version('3.1-2', sha256='a332f16e998ab10b17a2b1b9838d61660c36e914fe4d2e388a59f031d52ad736')
version('3.1-1', sha256='4854517dec30ab7c994de862aae1998c2d0c5e71265fd9eb7ed36891d4676078')
version('3.1-0.1', sha256='5879823b7fb6e6df0c0fe98faabc1044a4149bb65989062df4ade64e19d26411')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.0:', type=('build', 'run'))
depends_on('r-rcpparmadillo', type=('build', 'run'))
|
Python
| 0
|
@@ -296,16 +296,1438 @@
ometrics
+%0A%0A Covers many important models used in marketing and micro-econometrics%0A applications. The package includes: Bayes Regression (univariate or%0A multivariate dep var), Bayes Seemingly Unrelated Regression (SUR), Binary%0A and Ordinal Probit, Multinomial Logit (MNL) and Multinomial Probit (MNP),%0A Multivariate Probit, Negative Binomial (Poisson) Regression, Multivariate%0A Mixtures of Normals (including clustering), Dirichlet Process Prior Density%0A Estimation with normal base, Hierarchical Linear Models with normal prior%0A and covariates, Hierarchical Linear Models with a mixture of normals prior%0A and covariates, Hierarchical Multinomial Logits with a mixture of normals%0A prior and covariates, Hierarchical Multinomial Logits with a Dirichlet%0A Process prior and covariates, Hierarchical Negative Binomial Regression%0A Models, Bayesian analysis of choice-based conjoint data, Bayesian treatment%0A of linear instrumental variables models, Analysis of Multivariate Ordinal%0A survey data with scale usage heterogeneity (as in Rossi et al, JASA (01)),%0A Bayesian Analysis of Aggregate Random Coefficient Logit Models as in BLP%0A (see Jiang, Manchanda, Rossi 2009) For further reference, consult our book,%0A Bayesian Statistics and Marketing by Rossi, Allenby and McCulloch (Wiley%0A 2005) and Bayesian Non- and Semi-Parametric Methods and Applications%0A (Princeton U Press 2014).
%22%22%22%0A%0A
@@ -1935,16 +1935,112 @@
ayesm%22%0A%0A
+ version('3.1-4', sha256='061b216c62bc72eab8d646ad4075f2f78823f9913344a781fa53ea7cf4a48f94')%0A
vers
|
52e0f47a3ff67bd0c8a31c6755b384dedd70ee02
|
update scalasca to latest version, simplify recipe (#11999)
|
var/spack/repos/builtin/packages/scalasca/package.py
|
var/spack/repos/builtin/packages/scalasca/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Scalasca(AutotoolsPackage):
"""Scalasca is a software tool that supports the performance optimization
of parallel programs by measuring and analyzing their runtime
behavior. The analysis identifies potential performance
bottlenecks - in particular those concerning communication and
synchronization - and offers guidance in exploring their causes.
"""
homepage = "http://www.scalasca.org"
url = "http://apps.fz-juelich.de/scalasca/releases/scalasca/2.1/dist/scalasca-2.1.tar.gz"
version('2.4', '4a895868258030f700a635eac93d36764f60c8c63673c7db419ea4bcc6b0b760')
version('2.3.1', 'a83ced912b9d2330004cb6b9cefa7585')
version('2.2.2', '2bafce988b0522d18072f7771e491ab9')
version('2.1', 'bab9c2b021e51e2ba187feec442b96e6')
depends_on("mpi")
# version 2.4
depends_on('cubew@4.4:', when='@2.4:')
# version 2.3
depends_on('cube@4.3', when='@2.3:2.3.99')
depends_on('otf2@2:', when='@2.3:')
# version 2.1+
depends_on('cube@4.2', when='@2.1:2.2.999')
depends_on('otf2@1.4', when='@2.1:2.2.999')
def url_for_version(self, version):
return 'http://apps.fz-juelich.de/scalasca/releases/scalasca/{0}/dist/scalasca-{1}.tar.gz'.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
config_args = ["--enable-shared"]
if spec.satisfies('@2.4:'):
config_args.append("--with-cube=%s" % spec['cubew'].prefix.bin)
else:
config_args.append("--with-cube=%s" % spec['cube'].prefix.bin)
config_args.append("--with-otf2=%s" % spec['otf2'].prefix.bin)
if self.spec['mpi'].name == 'openmpi':
config_args.append("--with-mpi=openmpi")
elif self.spec.satisfies('^mpich@3:'):
config_args.append("--with-mpi=mpich3")
return config_args
|
Python
| 0
|
@@ -745,16 +745,186 @@
tar.gz%22%0A
+ list_url = %22https://scalasca.org/scalasca/front_content.php?idart=1072%22%0A%0A version('2.5', sha256='7dfa01e383bfb8a4fd3771c9ea98ff43772e415009d9f3c5f63b9e05f2dde0f6')
%0A ver
@@ -1217,16 +1217,17 @@
sion 2.4
++
%0A dep
@@ -1259,24 +1259,84 @@
n='@2.4:')%0A%0A
+ # version 2.3+%0A depends_on('otf2@2:', when='@2.3:')%0A%0A
# versio
@@ -1391,48 +1391,8 @@
99')
-%0A depends_on('otf2@2:', when='@2.3:')
%0A%0A
@@ -1406,17 +1406,22 @@
sion 2.1
-+
+ - 2.2
%0A dep
|
8ffe530025e38d06ffc567fb69e9b96874db3faa
|
Increase version
|
conveyor/__init__.py
|
conveyor/__init__.py
|
__version__ = "0.1.dev2"
|
Python
| 0
|
@@ -19,7 +19,7 @@
.dev
-2
+3
%22%0A
|
922c3c7b708948fd5ded332ff2999e732c0bf5d8
|
test that alias is used
|
test/unit/keywords/test_browsermanagement.py
|
test/unit/keywords/test_browsermanagement.py
|
import unittest
from Selenium2Library.keywords._browsermanagement import _BrowserManagementKeywords
from selenium import webdriver
from mockito import *
class BrowserManagementTests(unittest.TestCase):
def test_create_firefox_browser(self):
test_browsers = ((webdriver.Firefox, "ff"), (webdriver.Firefox, "firEfOx"))
for test_browser in test_browsers:
self.verify_browser(*test_browser)
def mock_createProfile(self, profile_directory=None):
self.ff_profile_dir = profile_directory
return self.old_profile_init(profile_directory)
def test_create_ie_browser(self):
test_browsers = ((webdriver.Ie, "ie"), (webdriver.Ie, "Internet Explorer"))
for test_browser in test_browsers:
self.verify_browser(*test_browser)
def test_create_chrome_browser(self):
test_browsers = ((webdriver.Chrome, "gOOglEchrOmE"),(webdriver.Chrome,"gc"),
(webdriver.Chrome, "chrome"))
for test_browser in test_browsers:
self.verify_browser(*test_browser)
def test_create_opera_browser(self):
self.verify_browser(webdriver.Opera, "OPERA")
def test_create_phantomjs_browser(self):
self.verify_browser(webdriver.PhantomJS, "PHANTOMJS")
def test_create_remote_browser(self):
self.verify_browser(webdriver.Remote, "chrome", remote="http://127.0.0.1/wd/hub")
def test_create_htmlunit_browser(self):
self.verify_browser(webdriver.Remote, "htmlunit")
def test_create_htmlunitwihtjs_browser(self):
self.verify_browser(webdriver.Remote, "htmlunitwithjs")
def test_parse_capabilities_string(self):
bm = _BrowserManagementKeywords()
expected_caps = "key1:val1,key2:val2"
capabilities = bm._parse_capabilities_string(expected_caps)
self.assertTrue("val1", capabilities["key1"])
self.assertTrue("val2", capabilities["key2"])
self.assertTrue(2, len(capabilities))
def test_create_remote_browser_with_desired_prefs(self):
expected_caps = {"key1":"val1","key2":"val2"}
self.verify_browser(webdriver.Remote, "chrome", remote="http://127.0.0.1/wd/hub",
desired_capabilities=expected_caps)
def test_create_remote_browser_with_string_desired_prefs(self):
expected_caps = "key1:val1,key2:val2"
self.verify_browser(webdriver.Remote, "chrome", remote="http://127.0.0.1/wd/hub",
desired_capabilities=expected_caps)
def test_set_selenium_timeout_only_affects_open_browsers(self):
bm = _BrowserManagementKeywords()
first_browser, second_browser = mock(), mock()
bm._cache.register(first_browser)
bm._cache.close()
verify(first_browser).quit()
bm._cache.register(second_browser)
bm.set_selenium_timeout("10 seconds")
verify(second_browser).set_script_timeout(10.0)
bm._cache.close_all()
verify(second_browser).quit()
bm.set_selenium_timeout("20 seconds")
verifyNoMoreInteractions(first_browser)
verifyNoMoreInteractions(second_browser)
def test_bad_browser_name(self):
bm = _BrowserManagementKeywords()
try:
bm._make_browser("fireox")
self.fail("Exception not raised")
except ValueError, e:
self.assertEquals("fireox is not a supported browser.", e.message)
def test_create_webdriver(self):
bm = _BrowserManagementWithLoggingStubs()
capt_data = {}
class FakeWebDriver(mock):
def __init__(self, some_arg=None):
mock.__init__(self)
capt_data['some_arg'] = some_arg
capt_data['webdriver'] = self
webdriver.FakeWebDriver = FakeWebDriver
try:
index = bm.create_webdriver('FakeWebDriver', some_arg=1)
self.assertEquals(capt_data['some_arg'], 1)
self.assertEquals(capt_data['webdriver'], bm._current_browser())
self.assertEquals(capt_data['webdriver'], bm._cache.get_connection(index))
capt_data.clear()
my_kwargs = {'some_arg':2}
bm.create_webdriver('FakeWebDriver', kwargs=my_kwargs)
self.assertEquals(capt_data['some_arg'], 2)
finally:
del webdriver.FakeWebDriver
def verify_browser(self , webdriver_type , browser_name, **kw):
#todo try lambda *x: was_called = true
bm = _BrowserManagementKeywords()
old_init = webdriver_type.__init__
webdriver_type.__init__ = self.mock_init
try:
self.was_called = False
bm._make_browser(browser_name, **kw)
except AttributeError:
pass #kinda dangerous but I'm too lazy to mock out all the set_timeout calls
finally:
webdriver_type.__init__ = old_init
self.assertTrue(self.was_called)
def mock_init(self, *args, **kw):
self.was_called = True
class _BrowserManagementWithLoggingStubs(_BrowserManagementKeywords):
def __init__(self):
_BrowserManagementKeywords.__init__(self)
def mock_logging_method(self, *args, **kwargs):
pass
for name in ['_info', '_debug', '_warn', '_log', '_html']:
setattr(self, name, mock_logging_method)
|
Python
| 0
|
@@ -3851,16 +3851,24 @@
Driver',
+ 'fake',
some_ar
@@ -4092,16 +4092,104 @@
index))%0A
+ self.assertEquals(capt_data%5B'webdriver'%5D, bm._cache.get_connection('fake'))%0A
|
ce537832eb3d1c0a7ceec213abe1d52c189037c2
|
fix a bug in the controller of new courses
|
course_controller.py
|
course_controller.py
|
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
import main_controller
import app.models.course_model as coursemodel
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Index(main_controller._BaseHandler):
def get(self):
courses = coursemodel.All()
self.template_values['courses'] = courses
template = JINJA_ENVIRONMENT.get_template('app/views/course/index.html')
self.response.write(template.render(self.template_values))
class Show(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Get(key = my_key)
self.template_values['course'] = course
template = JINJA_ENVIRONMENT.get_template('app/views/course/show.html')
self.response.write(template.render(self.template_values))
class New(main_controller._BaseHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('app/views/course/new.html')
self.response.write(template.render())
def post(self):
self.course = coursemodel.Insert(name=self.request.get('name'), description=self.request.get('description'), lang=self.request.get('lang'))
#TODO redirect to show web of the new object
self.redirect('/courses/show?key='+self.course.key.urlsafe())
class Edit(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Get(my_key)
self.template_values['course'] = course
template = JINJA_ENVIRONMENT.get_template('app/views/course/edit.html')
self.response.write(template.render(self.template_values))
def post(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
self.course = coursemodel.Update(key = my_key, name=self.request.get('name'), description=self.request.get('description'), lang=self.request.get('lang'))
self.redirect('/courses/show?key='+self.course.key.urlsafe())
class Destroy(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Delete(key = my_key)
self.redirect('/courses')
app = webapp2.WSGIApplication([
('/courses', Index),
('/courses/show', Show),
('/courses/new', New),
('/courses/edit', Edit),
('/courses/destroy', Destroy)
], debug=True)
|
Python
| 0
|
@@ -1252,16 +1252,36 @@
.render(
+self.template_values
))%0A%0A
|
82bfb1da1d9b03699bee0dbf556dbf51d779022f
|
fix error
|
pywt/tests/test_matlab_compatibility_cwt.py
|
pywt/tests/test_matlab_compatibility_cwt.py
|
"""
Test used to verify PyWavelets Continous Wavelet Transform computation
accuracy against MathWorks Wavelet Toolbox.
"""
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import assert_, dec, run_module_suite
import pywt
if 'PYWT_XSLOW' in os.environ:
# Run a more comprehensive set of problem sizes. This could take more than
# an hour to complete.
size_set = 'full'
use_precomputed = False
else:
size_set = 'reduced'
use_precomputed = True
if use_precomputed:
data_dir = os.path.join(os.path.dirname(__file__), 'data')
matlab_data_file = os.path.join(data_dir, 'cwt_matlabR2015b_result.npz')
matlab_result_dict = np.load(matlab_data_file)
else:
try:
from pymatbridge import Matlab
mlab = Matlab()
_matlab_missing = False
except ImportError:
print("To run Matlab compatibility tests you need to have MathWorks "
"MATLAB, MathWorks Wavelet Toolbox and the pymatbridge Python "
"package installed.")
_matlab_missing = True
# list of mode names in pywt and matlab
modes = [('zero', 'zpd'),
('constant', 'sp0'),
('symmetric', 'sym'),
('periodic', 'ppd'),
('smooth', 'sp1'),
('periodization', 'per')]
families = ('gaus', 'mexh', 'morl', 'cgau', 'shan', 'fbsp', 'cmor')
wavelets = sum([pywt.wavelist(name) for name in families], [])
def _get_data_sizes(w):
""" Return the sizes to test for wavelet w. """
if size_set == 'full':
data_sizes = list(range(100, 101)) + \
[100, 200, 500, 1000, 50000]
else:
data_sizes = (1000, 1000 + 1)
return data_sizes
def _get_scales(w):
""" Return the scales to test for wavelet w. """
if size_set == 'full':
Scales = (1,2,3,4)
else:
Scales = (1,2)
return Scales
@dec.skipif(use_precomputed or _matlab_missing)
@dec.slow
def test_accuracy_pymatbridge_cwt():
rstate = np.random.RandomState(1234)
# max RMSE (was 1.0e-10, is reduced to 5.0e-5 due to different coefficents)
epsilon = 5.0e-5
epsilon_pywt_coeffs = 1.0e-10
mlab.start()
try:
for wavelet in wavelets:
w = pywt.Wavelet(wavelet)
if np.any((wavelet == np.array(['shan', 'cmor'])),axis=0):
mlab.set_variable('wavelet', wavelet+str(w.bandwidth_frequency)+'-'+str(w.center_frequency))
elif wavelet == 'fbsp':
mlab.set_variable('wavelet', wavelet+str(w.fbsp_order)+'-'+str(w.bandwidth_frequency)+'-'+str(w.center_frequency))
else:
mlab.set_variable('wavelet', wavelet)
for N in _get_data_sizes(w):
data = rstate.randn(N)
mlab.set_variable('data', data)
for scales in Scales:
coefs = _compute_matlab_result(data, wavelet, scales)
yield _check_accuracy, data, w, scales, coefs, wavelet, epsilon
finally:
mlab.stop()
@dec.skipif(not use_precomputed)
@dec.slow
def test_accuracy_precomputed_cwt():
# Keep this specific random seed to match the precomputed Matlab result.
rstate = np.random.RandomState(1234)
# max RMSE (was 1.0e-10, is reduced to 5.0e-5 due to different coefficents)
epsilon = 5.0e-5
epsilon_pywt_coeffs = 1.0e-10
for wavelet in wavelets:
w = pywt.Wavelet(wavelet)
if np.any((wavelet == np.array(['shan', 'cmor'])),axis=0):
wavelet = wavelet+str(w.bandwidth_frequency)+'-'+str(w.center_frequency)
elif wavelet == 'fbsp':
wavelet = wavelet+str(w.fbsp_order)+'-'+str(w.bandwidth_frequency)+'-'+str(w.center_frequency)
for N in _get_data_sizes(w):
data = rstate.randn(N)
for scales in Scales:
coefs = _load_matlab_result(data, wavelet, scales)
yield _check_accuracy, data, w, scales, coefs, wavelet, epsilon
def _compute_matlab_result(data, wavelet, scales):
""" Compute the result using MATLAB.
This function assumes that the Matlab variables `wavelet` and `data` have
already been set externally.
"""
mlab.set_variable('scales', scales)
mlab_code = ("coefs = cwt(data, scales, wavelet)")
res = mlab.run_code(mlab_code)
if not res['success']:
raise RuntimeError("Matlab failed to execute the provided code. "
"Check that the wavelet toolbox is installed.")
# need np.asarray because sometimes the output is a single float64
coefs = np.asarray(mlab.get_variable('coefs'))
return coefs
def _load_matlab_result(data, wavelet, scales):
""" Load the precomputed result.
"""
N = len(data)
coefs_key = '_'.join([str(scales), wavelet, str(N), 'coefs'])
if (coefs_key not in matlab_result_dict):
raise KeyError(
"Precompted Matlab result not found for wavelet: "
"{0}, mode: {1}, size: {2}".format(wavelet, scales, N))
coefs = matlab_result_dict[coefs_key]
return coefs
def _check_accuracy(data, w, scales, coefs, wavelet, epsilon):
# PyWavelets result
coefs_pywt = pywt.cwt(data, scales, w)
# calculate error measures
rms = np.sqrt(np.mean((coefs_pywt - coefs) ** 2))
msg = ('[RMS_A > EPSILON] for Scale: %s, Wavelet: %s, '
'Length: %d, rms=%.3g' % (scales, wavelet, len(data), rms))
assert_(rms < epsilon, msg=msg)
if __name__ == '__main__':
run_module_suite()
|
Python
| 0.000002
|
@@ -2833,38 +2833,46 @@
for scales in
-S
+_get_s
cales
+(w)
:%0A
@@ -3837,22 +3837,30 @@
ales in
-S
+_get_s
cales
+(w)
:%0A
|
20d5f5d5e10dcf118639b4ca538ef7537863145a
|
add cache 2 hours
|
dv_apps/dvobject_api/views_dataverses.py
|
dv_apps/dvobject_api/views_dataverses.py
|
import json
from collections import OrderedDict
from django.shortcuts import render
from django.http import Http404
from django.conf import settings
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from django.forms.models import model_to_dict
from django.views.decorators.cache import cache_page
from django.core import serializers
from dv_apps.dataverses.models import Dataverse
from dv_apps.dataverses.util import DataverseUtil
def get_pretty_val(request):
"""Quick check of url param to pretty print JSON"""
if request.GET.get('pretty', None) is not None:
return True
return False
def view_single_dataverse_by_alias(request, alias):
try:
dv = Dataverse.objects.select_related('dvobject').get(alias=alias)
except Dataverse.DoesNotExist:
raise Http404
return view_single_dataverse(request, dv)
def view_single_dataverse_by_id(request, dataverse_id):
try:
dv = Dataverse.objects.select_related('dvobject').get(dvobject__id =dataverse_id)
except Dataverse.DoesNotExist:
raise Http404
return view_single_dataverse(request, dv)
@cache_page(60 * 15)
def view_single_dataverse(request, dv):
"""
Show JSON for a single Dataverse
"""
if dv is None:
raise Http404
assert isinstance(dv, Dataverse), "dv must be a Dataverse object or None"
is_pretty = request.GET.get('pretty', None)
if is_pretty is not None:
is_pretty = True
resp_dict = OrderedDict()
resp_dict['status'] = "OK"
resp_dict['data'] = DataverseUtil(dv).as_json()
#model_to_dict(dv)
if is_pretty:
s = '<pre>%s</pre>' % json.dumps(resp_dict, indent=4)
return HttpResponse(s)
else:
return JsonResponse(resp_dict)#, content_type='application/json')
|
Python
| 0
|
@@ -659,16 +659,41 @@
False%0A%0A
+@cache_page(60 * 60 * 2)%0A
def view
@@ -926,16 +926,41 @@
t, dv)%0A%0A
+@cache_page(60 * 60 * 2)%0A
def view
|
4e515f070f844569b84eeb77f7e7eda883bc861e
|
fix class name
|
easy_my_coop/wizard/update_share_line.py
|
easy_my_coop/wizard/update_share_line.py
|
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.exceptions import UserError
class PartnerUpdateInfo(models.TransientModel):
_name = "share.line.update.info"
@api.model
def _get_share_line(self):
active_id = self.env.context.get('active_id')
return self.env['share.line'].browse(active_id)
@api.model
def _get_effective_date(self):
share_line = self._get_share_line()
return share_line.effective_date
effective_date = fields.Date(string="effective date",
required=True,
default=_get_effective_date)
cooperator = fields.Many2one(related='share_line.partner_id',
string="Cooperator")
share_line = fields.Many2one('share.line',
string="Share line",
default=_get_share_line)
@api.multi
def update(self):
line = self.share_line
cooperator = line.partner_id
sub_reg = self.env['subscription.register'].search(
[('partner_id', '=', cooperator.id),
('share_product_id', '=', line.share_product_id.id),
('quantity', '=', line.share_number),
('date', '=', line.effective_date)])
if sub_reg:
if len(sub_reg) > 1:
raise UserError(_("Error the update return more than one"
" subscription register lines."))
else:
line.effective_date = self.effective_date
sub_reg.date = self.effective_date
return True
|
Python
| 0.000051
|
@@ -114,15 +114,17 @@
ass
-Part
+ShareLi
ne
-r
Upda
|
81cea197e6c8b50a9b2083708962ce167cb529fd
|
Change database api to deny the deletion of a database in quarantine
|
dbaas/api/database.py
|
dbaas/api/database.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import viewsets, serializers, status
from rest_framework.response import Response
from logical import models
from physical.models import Plan, Environment, DatabaseInfra
from account.models import Team
from .credential import CredentialSerializer
from django.contrib.sites.models import Site
from notification.tasks import create_database
import logging
LOG = logging.getLogger(__name__)
class DatabaseSerializer(serializers.HyperlinkedModelSerializer):
plan = serializers.HyperlinkedRelatedField(
source='plan', view_name='plan-detail', queryset=Plan.objects)
environment =serializers.HyperlinkedRelatedField(source='environment', view_name='environment-detail'
, queryset=Environment.objects)
team = serializers.HyperlinkedRelatedField(
source='team', view_name='team-detail', queryset=Team.objects)
endpoint = serializers.Field(source='endpoint')
quarantine_dt = serializers.Field(source='quarantine_dt')
total_size_in_bytes = serializers.Field(source='total_size')
credentials = CredentialSerializer(many=True, read_only=True)
status = serializers.Field(source='status')
used_size_in_bytes = serializers.Field(source='used_size_in_bytes')
class Meta:
model = models.Database
fields = ('url', 'id', 'name', 'endpoint', 'plan', 'environment', 'project', 'team',
'quarantine_dt', 'total_size_in_bytes', 'credentials','description', 'status', 'used_size_in_bytes')
read_only = ('credentials', 'status', 'used_size_in_bytes')
def __init__(self, *args, **kwargs):
super(DatabaseSerializer, self).__init__(*args, **kwargs)
request = self.context.get('request', None)
if request:
creating = request.method == 'POST'
# when database is created, user can't change plan, environment and name
self.fields['plan'].read_only = not creating
self.fields['environment'].read_only = not creating
self.fields['name'].read_only = not creating
self.fields['credentials'].read_only = True
self.fields['description'].read_only = not creating
# quarantine is always readonly
# self.fields['quarantine_dt'].read_only = True
class DatabaseAPI(viewsets.ModelViewSet):
"""
* ### __List databases__
__GET__ /api/database/
* ### __To create a new database__
__POST__ /api/database/
{
"name": "{name}",
"plan": "{api_url}/plan/{plan_id}/",
"environment": "{api_url}/environment/{environment_id}/",
"project": "{api_url}/project/{project_id}/",
"team": "{api_url}/team/{team_id}/",
"description": "{description}"
}
* ### __Show details about a database__
__GET__ /api/database/`database_id`/
* ### __To delete a database (will put it on quarantine)__
__DELETE__ /api/database/`database_id`/
* ### __To change database project__
__PUT__ /api/database/`database_id`/
{
"project": "{api_url}/project/{project_id}/"
}
"""
serializer_class = DatabaseSerializer
queryset = models.Database.objects.all()
def create(self, request):
serializer = self.get_serializer(data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
data = serializer.restore_fields(request.DATA, request.FILES)
LOG.info("Plan %s" % data['plan'])
result = create_database.delay(data['name'],
data['plan'],
data['environment'],
data['team'],
data['project'],
data['description'],
request.user)
#data = serializer.to_native(self.object)
#self.post_save(self.object, created=True)
headers = self.get_success_headers(data)
task_url = Site.objects.get_current().domain + '/api/task?task_id=%s' % str(result.id)
return Response({"task":task_url}, status=status.HTTP_201_CREATED,
headers=headers)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
Python
| 0
|
@@ -659,32 +659,55 @@
set=Plan.objects
+.filter(is_active=True)
)%0A environmen
@@ -4607,8 +4607,324 @@
EQUEST)%0A
+%0A def destroy(self, request, *args, **kwargs):%0A instance = self.get_object()%0A%0A if not instance.is_in_quarantine:%0A self.perform_destroy(instance)%0A return Response(status=status.HTTP_204_NO_CONTENT)%0A else:%0A return Response(status=status.HTTP_401_UNAUTHORIZED)%0A%0A
|
38db4b0a23e2c2aaf858d0b2bd9d5ae4df819e66
|
Move imports in mythicbeastsdns component (#28033)
|
homeassistant/components/mythicbeastsdns/__init__.py
|
homeassistant/components/mythicbeastsdns/__init__.py
|
"""Support for Mythic Beasts Dynamic DNS service."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_DOMAIN,
CONF_HOST,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mythicbeastsdns"
DEFAULT_INTERVAL = timedelta(minutes=10)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Initialize the Mythic Beasts component."""
import mbddns
domain = config[DOMAIN][CONF_DOMAIN]
password = config[DOMAIN][CONF_PASSWORD]
host = config[DOMAIN][CONF_HOST]
update_interval = config[DOMAIN][CONF_SCAN_INTERVAL]
session = async_get_clientsession(hass)
result = await mbddns.update(domain, password, host, session=session)
if not result:
return False
async def update_domain_interval(now):
"""Update the DNS entry."""
await mbddns.update(domain, password, host, session=session)
async_track_time_interval(hass, update_domain_interval, update_interval)
return True
|
Python
| 0
|
@@ -50,23 +50,8 @@
%22%22%22%0A
-import logging%0A
from
@@ -81,87 +81,63 @@
lta%0A
-%0A
import
-voluptuous as vol%0A%0Aimport homeassistant.helpers.config_validation as cv
+logging%0A%0Aimport mbddns%0Aimport voluptuous as vol%0A
%0Afro
@@ -317,16 +317,69 @@
session%0A
+import homeassistant.helpers.config_validation as cv%0A
from hom
@@ -1099,27 +1099,8 @@
%22%22%22%0A
- import mbddns%0A%0A
|
5616573372638f2b195714cf02db8a7a02a4678f
|
Correct column name
|
luigi/tasks/rfam/pgload_go_term_mapping.py
|
luigi/tasks/rfam/pgload_go_term_mapping.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.go_terms.pgload_go_terms import PGLoadGoTerms
from .go_term_mapping_csv import RfamGoTermsCSV
from .pgload_families import RfamPGLoadFamilies
CONTROL_FILE = """LOAD CSV
FROM '{filename}' WITH ENCODING ISO-8859-14
HAVING FIELDS
(
go_term_id,
rfam_model_id
)
INTO {db_url}
TARGET COLUMNS
(
go_term_id,
name
)
SET
search_path = '{search_path}'
WITH
skip header = 1,
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists load_rfam_go_terms (
go_term_id character varying(10) COLLATE pg_catalog."default" NOT NULL,
rfam_model_id character varying(20) COLLATE pg_catalog."default" NOT NULL
);
$$,
$$
truncate table load_rfam_go_terms;
$$
AFTER LOAD DO
$$ insert into rfam_go_terms (
go_term_id,
rfam_model_id
) (
select
go_term_id,
rfam_model_id
from load_rfam_go_terms
)
ON CONFLICT (go_term_id, rfam_model_id) DO UPDATE SET
go_term_id = excluded.go_term_id,
rfam_model_id = excluded.rfam_model_id
;
$$,
$$
drop table load_rfam_go_terms;
$$
;
"""
class RfamPGLoadGoTerms(PGLoader): # pylint: disable=R0904
"""
This will run pgloader on the Rfam go term mapping CSV file. The importing
will update any existing mappings and will not produce duplicates.
"""
def requires(self):
return [
RfamGoTermsCSV(),
PGLoadGoTerms(),
RfamPGLoadFamilies(),
]
def control_file(self):
filename = RfamGoTermsCSV().output().fn
return CONTROL_FILE.format(
filename=filename,
db_url=self.db_url(table='load_rfam_go_terms'),
search_path=self.db_search_path(),
)
|
Python
| 0
|
@@ -982,20 +982,29 @@
id,%0A
-name
+rfam_model_id
%0A)%0ASET%0A
|
1f3325519a72cb98669185149b03b11c1ec25f70
|
Fix line number convention
|
bears/c_languages/CPPLintBear.py
|
bears/c_languages/CPPLintBear.py
|
import sys
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.settings.Setting import typed_list
@linter(executable='cpplint',
use_stdout=False,
use_stderr=True,
output_format='regex',
output_regex=r'.+:(?P<line>\d+): (?P<message>.+)')
class CPPLintBear:
"""
Check C++ code for Google's C++ style guide.
For more information, consult <https://github.com/theandrewdavis/cpplint>.
"""
LANGUAGES = {'C++'}
REQUIREMENTS = {PipRequirement('cpplint', '1.3')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file,
max_line_length: int = 79,
cpplint_ignore: typed_list(str) = (),
cpplint_include: typed_list(str) = (),
):
"""
:param max_line_length:
Maximum number of characters for a line.
When set to 0 allows infinite line length.
:param cpplint_ignore:
List of checkers to ignore.
:param cpplint_include:
List of checkers to explicitly enable.
"""
if not max_line_length:
max_line_length = sys.maxsize
ignore = ','.join('-'+part.strip() for part in cpplint_ignore)
include = ','.join('+'+part.strip() for part in cpplint_include)
return ('--filter=' + ignore + ',' + include,
'--linelength=' + str(max_line_length),
filename)
|
Python
| 0
|
@@ -215,16 +215,53 @@
plint',%0A
+ normalize_line_numbers=True,%0A
|
6ded4b8d29788c420197fca0ae9552bb4b964ad7
|
Update acute_vertex.py
|
plantcv/plantcv/acute_vertex.py
|
plantcv/plantcv/acute_vertex.py
|
# Script to identify corners/acute angles of an object
import os
import cv2
import numpy as np
import math
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import params
from plantcv.plantcv import outputs
def acute_vertex(img, obj, win, thresh, sep):
"""acute_vertex: identify corners/acute angles of an object
For each point in contour, get a point before (pre) and after (post) the point of interest,
calculate the angle between the pre and post point.
Inputs:
img = the original image
obj = a contour of the plant object (this should be output from the object_composition.py fxn)
win = win argument specifies the pre and post point distances (a value of 30 worked well for a sample image)
thresh = an threshold to set for acuteness; keep points with an angle more acute than the threshold (a value of 15
worked well for sample image)
sep = the number of contour points to search within for the most acute value
Returns:
acute_points = list of acute points
img2 = debugging image
:param img: ndarray
:param obj: ndarray
:param win: int
:param thresh: int
:param sep: int
:return acute_points: ndarray
:return img2: ndarray
"""
params.device += 1
chain = []
if not np.any(obj):
acute = ('NA', 'NA')
return acute
for i in range(len(obj) - win):
x, y = obj[i].ravel()
pre_x, pre_y = obj[i - win].ravel()
post_x, post_y = obj[i + win].ravel()
# Angle in radians derived from Law of Cosines, converted to degrees
P12 = np.sqrt((x-pre_x)*(x-pre_x)+(y-pre_y)*(y-pre_y))
P13 = np.sqrt((x-post_x)*(x-post_x)+(y-post_y)*(y-post_y))
P23 = np.sqrt((pre_x-post_x)*(pre_x-post_x)+(pre_y-post_y)*(pre_y-post_y))
if (2*P12*P13) > 0.001:
dot = (P12*P12 + P13*P13 - P23*P23)/(2*P12*P13)
elif (2*P12*P13) < 0.001:
dot = (P12*P12 + P13*P13 - P23*P23)/0.001
if dot < -1: # If float exceeds -1 prevent arcos error and force to equal -1
dot = -1
ang = math.degrees(math.acos(dot))
chain.append(ang)
# Select points in contour that have an angle more acute than thresh
index = []
for c in range(len(chain)):
if float(chain[c]) <= thresh:
index.append(c)
# There oftentimes several points around tips with acute angles
# Here we try to pick the most acute angle given a set of contiguous point
# Sep is the number of points to evaluate the number of vertices
out = []
tester = []
for i in range(len(index)-1):
# print str(index[i])
if index[i+1] - index[i] < sep:
tester.append(index[i])
if index[i+1] - index[i] >= sep:
tester.append(index[i])
# print(tester)
angles = ([chain[d] for d in tester])
keeper = angles.index(min(angles))
t = tester[keeper]
# print str(t)
out.append(t)
tester = []
# Store the points in the variable acute
acute = obj[[out]]
acute_points = []
for pt in acute:
acute_points.append(pt[0].tolist())
img2 = np.copy(img)
# Plot each of these tip points on the image
for i in acute:
x, y = i.ravel()
cv2.circle(img2, (x, y), params.line_thickness, (255, 0, 255), -1)
if params.debug == 'print':
print_image(img2, os.path.join(params.debug_outdir, str(params.device) + '_acute_vertices.png'))
elif params.debug == 'plot':
plot_image(img2)
# Store into global measurements
outputs.add_observation(variable='tip_coordinates', trait='tip coordinates',
method='plantcv.plantcv.acute_vertex', scale='none', datatype=list,
value=acute_points, label='none')
return acute_points, img2
|
Python
| 0.000001
|
@@ -296,16 +296,28 @@
esh, sep
+, label=None
):%0A %22
@@ -1037,16 +1037,108 @@
e value%0A
+ label = optional label parameter, modifies the variable name of observations recorded%0A%0A
%0A Ret
@@ -1326,24 +1326,46 @@
am sep: int%0A
+ :param label: str%0A
:return
@@ -3572,16 +3572,98 @@
), -1)%0A%0A
+ if label == None:%0A prefix = %22%22%0A else:%0A prefix = label + %22_%22%0A%0A
if p
@@ -3766,16 +3766,25 @@
evice) +
+ prefix +
'_acute
@@ -3892,24 +3892,24 @@
easurements%0A
-
outputs.
@@ -3933,16 +3933,25 @@
ariable=
+prefix +
'tip_coo
|
58c6868cc95a44100f18f20dfe91764727263005
|
Write bytes to fobj if we open it in 'wb' mode
|
django_babel/management/commands/babel.py
|
django_babel/management/commands/babel.py
|
# -*- coding: utf-8 -*-
import os
from distutils.dist import Distribution
from optparse import make_option
from subprocess import call
from django.core.management.base import LabelCommand, CommandError
from django.conf import settings
class Command(LabelCommand):
args = '[makemessages] [compilemessages]'
option_list = LabelCommand.option_list + (
make_option(
'--locale', '-l',
default=None, dest='locale', action='append',
help='Creates or updates the message files for the given locale(s)'
' (e.g pt_BR). Can be used multiple times.'),
make_option('--domain', '-d',
default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--mapping-file', '-F',
default=None, dest='mapping_file',
help='Mapping file')
)
def handle_label(self, command, **options):
if command not in ('makemessages', 'compilemessages'):
raise CommandError(
"You must either apply 'makemessages' or 'compilemessages'"
)
if command == 'makemessages':
self.handle_makemessages(**options)
if command == 'compilemessages':
self.handle_compilemessages(**options)
def handle_makemessages(self, **options):
locale_paths = list(settings.LOCALE_PATHS)
domain = options.pop('domain')
locales = options.pop('locale')
# support for mapping file specification via setup.cfg
# TODO: Try to support all possible options.
distribution = Distribution()
distribution.parse_config_files(distribution.find_config_files())
mapping_file = options.pop('mapping_file', None)
has_extract = 'extract_messages' in distribution.command_options
if mapping_file is None and has_extract:
opts = distribution.command_options['extract_messages']
try:
mapping_file = opts['mapping_file'][1]
except (IndexError, KeyError):
mapping_file = None
for path in locale_paths:
potfile = os.path.join(path, '%s.pot' % domain)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.exists(potfile):
with open(potfile, 'wb') as fobj:
fobj.write('')
cmd = ['pybabel', 'extract', '-o', potfile]
if mapping_file is not None:
cmd.extend(['-F', mapping_file])
cmd.append(os.path.dirname(os.path.relpath(path)))
call(cmd)
for locale in locales:
pofile = os.path.join(
os.path.dirname(potfile),
locale,
'LC_MESSAGES',
'%s.po' % domain)
if not os.path.isdir(os.path.dirname(pofile)):
os.makedirs(os.path.dirname(pofile))
if not os.path.exists(pofile):
with open(pofile, 'wb') as fobj:
fobj.write('')
cmd = ['pybabel', 'update', '-D', domain,
'-i', potfile,
'-d', os.path.relpath(path),
'-l', locale]
call(cmd)
def handle_compilemessages(self, **options):
locale_paths = list(settings.LOCALE_PATHS)
domain = options.pop('domain')
locales = options.pop('locale')
for path in locale_paths:
for locale in locales:
po_file = os.path.join(
path, locale, 'LC_MESSAGES', domain + '.po'
)
if os.path.exists(po_file):
cmd = ['pybabel', 'compile', '-D', domain,
'-d', path, '-l', locale]
call(cmd)
|
Python
| 0.000001
|
@@ -2401,32 +2401,33 @@
fobj.write(
+b
'')%0A%0A
@@ -3128,16 +3128,17 @@
j.write(
+b
'')%0A%0A
|
57574cc20092f661ad5b7f4a47de63e16b72db50
|
Upgrade to 1.9
|
accounts/models.py
|
accounts/models.py
|
from __future__ import unicode_literals
import logging
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import django.core.mail
import django.contrib.auth.models
import django.db.models
import django.db.models.signals
import django.utils.timezone
import django.core.validators
import django.core.urlresolvers
import django.contrib.sites.models
import timezone_field
import localflavor.us.models
logger = logging.getLogger(__name__)
class Company(django.db.models.Model):
created_on = django.db.models.DateTimeField(auto_now_add=True)
updated_on = django.db.models.DateTimeField(auto_now=True)
name = django.db.models.CharField(_('Name'), max_length=100)
street_address = django.db.models.CharField(
_('Street Address'), max_length=200, blank=True)
street_address_2 = django.db.models.CharField(
_('Street Address 2'), max_length=200, blank=True)
city = django.db.models.CharField(_('City'), max_length=100, blank=True)
state = localflavor.us.models.USStateField(blank=True)
postal_code = localflavor.us.models.USZipCodeField(blank=True)
class Meta(django.contrib.auth.models.AbstractBaseUser.Meta):
verbose_name = _('Company')
verbose_name_plural = _('Companies')
def __unicode__(self):
return self.name
def get_address(self):
address = [self.name, ]
if self.street_address:
address.append(self.street_address)
if self.street_address_2:
address.append(self.street_address_2)
if self.city:
address.append('{}, {} {}'.format(
self.city, self.state, self.postal_code))
else:
address.append('{} {}'.format(self.state, self.postal_code))
return address
class UserManager(django.contrib.auth.models.BaseUserManager):
def create_user(self, email, password, first_name, last_name,
**extra_fields):
"""
Creates and saves a User with the given email and password.
"""
email = UserManager.normalize_email(email)
user = User(
email=email, first_name=first_name, last_name=last_name,
is_staff=False, is_active=True, is_superuser=False, **extra_fields)
user.set_password(password)
user.last_login = timezone.now()
user.save(using=self._db)
return user
def create_superuser(self, email, password, first_name, last_name,
**extra_fields):
u = self.create_user(email, password, first_name, last_name,
**extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class User(django.contrib.auth.models.AbstractBaseUser,
django.contrib.auth.models.PermissionsMixin):
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ('first_name', 'last_name', )
PERMISSION_MASQUERADE = 'accounts.masquerade'
created_on = django.db.models.DateTimeField(auto_now_add=True)
updated_on = django.db.models.DateTimeField(auto_now=True)
is_active = django.db.models.BooleanField(_('Active'), default=True)
is_staff = django.db.models.BooleanField(_('Staff'), default=False)
first_name = django.db.models.CharField(_('First Name'), max_length=50)
last_name = django.db.models.CharField(_('Last Name'), max_length=50)
email = django.db.models.EmailField(_('Email'), unique=True)
timezone = timezone_field.TimeZoneField(default='America/New_York')
company = django.db.models.ForeignKey(Company, null=True, related_name='users')
objects = UserManager()
class Meta(django.contrib.auth.models.AbstractBaseUser.Meta):
verbose_name = _('User')
verbose_name_plural = _('Users')
permissions = (
('masquerade', 'Can Masquerade'),
)
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
# hack the admin to change the superuser field verbose name
superuser_field = self._meta.get_field('is_superuser')
superuser_field.verbose_name = _('Superuser')
def __unicode__(self):
return self.get_full_name()
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
names = [n for n in (self.first_name, self.last_name, ) if n]
if names:
return ' '.join(names)
else:
return self.email
def get_short_name(self):
"""Returns the short name for the user."""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
django.core.mail.send_mail(
subject, message, from_email, [self.email], **kwargs)
class AuditLogEvent(django.db.models.Model):
created_on = django.db.models.DateTimeField(auto_now_add=True)
updated_on = django.db.models.DateTimeField(auto_now=True)
recorded_on = django.db.models.DateTimeField(auto_now_add=True)
user_id = django.db.models.IntegerField(_('User ID'), db_index=True)
user_email = django.db.models.EmailField(_('User Email'), db_index=True)
company = django.db.models.ForeignKey('accounts.Company')
message = django.db.models.TextField(_('Audit Message'))
masquerading_user_id = django.db.models.IntegerField(
_('Masquerading User ID'), db_index=True, blank=True, null=True)
masquerading_user_email = django.db.models.EmailField(
_('Masquerading User Email'), db_index=True, blank=True)
def __unicode__(self):
if self.is_masquerading:
return '{} {} [{}] {}'.format(
self.recorded_on, self.user_email, self.masquerading_user_email,
self.message)
else:
return '{} {} {}'.format(
self.recorded_on, self.user_email, self.message)
def delete(self, using=None):
return
@property
def is_masquerading(self):
return self.masquerading_user_id > 0
|
Python
| 0.00004
|
@@ -3633,16 +3633,53 @@
ll=True,
+ on_delete=django.db.models.SET_NULL,
related
|
99e0b2e29ec5baa525dce54a3bfcf69710f5a59b
|
Fix UserProfile creation
|
accounts/models.py
|
accounts/models.py
|
from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_save
from django.contrib.sites.models import RequestSite
from django.contrib.auth.models import User
from django.db import models
from registration import models as regmodels
from registration.signals import user_registered
from sanitizer.models import SanitizedTextField
from operator import itemgetter
from editor.models import Question, Exam, EditorTag
class RegistrationManager(regmodels.RegistrationManager):
def create_inactive_user(self, username, first_name, last_name, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
if first_name:
new_user.first_name = first_name
if last_name:
new_user.last_name = last_name
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
class RegistrationProfile(regmodels.RegistrationProfile):
objects = RegistrationManager()
class UserProfile(models.Model):
user = models.OneToOneField(User)
language = models.CharField(max_length=100,default='en-GB')
bio = SanitizedTextField(default='',allowed_tags=settings.SANITIZER_ALLOWED_TAGS,allowed_attributes=settings.SANITIZER_ALLOWED_ATTRIBUTES)
favourite_questions = models.ManyToManyField(Question,blank=True,related_name='fans')
favourite_exams = models.ManyToManyField(Exam,blank=True,related_name='fans')
def sorted_tags(self):
qs = self.user.own_questions
tags = EditorTag.objects.filter(question__author=self.user).distinct()
tag_counts = [(tag,len(qs.filter(tags__id=tag.id))) for tag in tags]
tag_counts.sort(key=itemgetter(1),reverse=True)
return tag_counts
def createUserProfile(sender, instance, **kwargs):
"""Create a UserProfile object each time a User is created ; and link it.
"""
UserProfile.objects.get_or_create(user=instance)
post_save.connect(createUserProfile, sender=User)
|
Python
| 0.000006
|
@@ -2449,16 +2449,30 @@
nstance,
+ user_created,
**kwarg
@@ -2560,24 +2560,50 @@
.%0D%0A %22%22%22%0D%0A
+ if user_created:%0D%0A
UserProf
@@ -2618,15 +2618,8 @@
cts.
-get_or_
crea
|
df7e5f56fdb2a9bc34a0fdf62b5847ee4183d32e
|
Update import_gist.py
|
lib/import_gist/bin/import_gist.py
|
lib/import_gist/bin/import_gist.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def import_gist(url_gist):
'''
import custom functions from gist.github.com
usage: mod_name = import_gist(url_gist)
params:
url_gist: url of gist. be sure to append '/raw/' to the gist url to load script, not html
e.g. https://gist.githubusercontent.com/cosacog/67ac95feef8a2a1cd373d43a86fe2c9c/raw/
'''
import os,sys, urllib, tempfile
fname_func = 'tmp_func.py' # temporary file name of .py
tmp_dir = tempfile.mkdtemp()
# check url_gist
# append '/' at the end
if url_gist[-1] is not '/':
url_gist = url_gist + '/'
# append 'raw/' at the end
if url_gist[-5:] != '/raw/':
url_gist = url_gist + 'raw/'
urllib.request.urlretrieve(url_gist, filename=os.path.join(tmp_dir,fname_func))
sys.path.append(tmp_dir)
import tmp_func as mod_func
sys.path.remove(tmp_dir)
return mod_func
if __name__ =='__main__':
print("I'm sorry. There is no main script.")
|
Python
| 0.000001
|
@@ -415,16 +415,42 @@
empfile%0A
+ import urllib.request%0A
fnam
|
7ef1afc579c62fa0c713d8db0bf17eb09b498a0b
|
Add unittest for random_integers
|
tests/cupy_tests/random_tests/test_sample.py
|
tests/cupy_tests/random_tests/test_sample.py
|
import mock
import unittest
from cupy import cuda
from cupy import testing
from cupy import random
@testing.gpu
class TestRandint(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
device_id = cuda.Device().id
self.m = mock.Mock()
self.m.interval.return_value = 0
random.generator._random_states = {device_id : self.m}
def test_value_error(self):
with self.assertRaises(ValueError):
random.randint(100, 1)
def test_high_and_size_are_none(self):
random.randint(3)
self.m.interval.assert_called_with(3, None)
def test_size_is_none(self):
random.randint(3, 5)
self.m.interval.assert_called_with(2, None)
def test_high_is_none(self):
random.randint(3, None, (1, 2, 3))
self.m.interval.assert_called_with(3, (1, 2, 3))
def test_no_none(self):
random.randint(3, 5, (1, 2, 3))
self.m.interval.assert_called_with(2, (1, 2, 3))
|
Python
| 0.000013
|
@@ -986,8 +986,586 @@
, 3))%0A%0A%0A
+@testing.gpu%0Aclass TestRandomIntegers(unittest.TestCase):%0A%0A _multiprocess_can_split_ = True%0A%0A def setUp(self):%0A random.sample_.randint = mock.Mock()%0A%0A def test_normal(self):%0A random.random_integers(3, 5)%0A random.sample_.randint.assert_called_with(3, 6, None)%0A%0A def test_high_is_none(self):%0A random.random_integers(3, None)%0A random.sample_.randint.assert_called_with(1, 4, None)%0A%0A def test_size_is_not_none(self):%0A random.random_integers(3, 5, (1, 2, 3))%0A random.sample_.randint.assert_called_with(3, 6, (1, 2, 3))%0A
|
5aa2a3e4b724784bbedaa5a436893e5ce28f7c45
|
Bump version to 0.2.3
|
fluentcms_emailtemplates/__init__.py
|
fluentcms_emailtemplates/__init__.py
|
# following PEP 440
__version__ = "0.2.2"
|
Python
| 0.000001
|
@@ -32,11 +32,11 @@
= %220.2.
-2
+3
%22%0A
|
eda0e5d60ca30a284c0b6b4fc209e595b5484941
|
Fix _import_from() on Python 2
|
dev/_import.py
|
dev/_import.py
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import imp
import sys
import os
from . import build_root, package_name, package_root
if sys.version_info < (3,):
getcwd = os.getcwdu
else:
getcwd = os.getcwd
def _import_from(mod, path, mod_dir=None, allow_error=False):
"""
Imports a module from a specific path
:param mod:
A unicode string of the module name
:param path:
A unicode string to the directory containing the module
:param mod_dir:
If the sub directory of "path" is different than the "mod" name,
pass the sub directory as a unicode string
:param allow_error:
If an ImportError should be raised when the module can't be imported
:return:
None if not loaded, otherwise the module
"""
if mod_dir is None:
mod_dir = mod.replace('.', os.sep)
if not os.path.exists(path):
return None
if not os.path.exists(os.path.join(path, mod_dir)) \
and not os.path.exists(os.path.join(path, mod_dir + '.py')):
return None
try:
mod_info = imp.find_module(mod_dir, [path])
return imp.load_module(mod, *mod_info)
except ImportError:
if allow_error:
raise
return None
def _preload(require_oscrypto, print_info):
"""
Preloads asn1crypto and optionally oscrypto from a local source checkout,
or from a normal install
:param require_oscrypto:
A bool if oscrypto needs to be preloaded
:param print_info:
A bool if info about asn1crypto and oscrypto should be printed
"""
if print_info:
print('Working dir: ' + getcwd())
print('Python ' + sys.version.replace('\n', ''))
asn1crypto = None
oscrypto = None
if require_oscrypto:
# Some CI services don't use the package name for the dir
if package_name == 'oscrypto':
oscrypto_dir = package_root
else:
oscrypto_dir = os.path.join(build_root, 'oscrypto')
oscrypto_tests = None
if os.path.exists(oscrypto_dir):
oscrypto_tests = _import_from('oscrypto_tests', oscrypto_dir, 'tests')
if oscrypto_tests is None:
import oscrypto_tests
asn1crypto, oscrypto = oscrypto_tests.local_oscrypto()
else:
if package_name == 'asn1crypto':
asn1crypto_dir = package_root
else:
asn1crypto_dir = os.path.join(build_root, 'asn1crypto')
if os.path.exists(asn1crypto_dir):
asn1crypto = _import_from('asn1crypto', asn1crypto_dir)
if asn1crypto is None:
import asn1crypto
if print_info:
print(
'\nasn1crypto: %s, %s' % (
asn1crypto.__version__,
os.path.dirname(asn1crypto.__file__)
)
)
if require_oscrypto:
print(
'oscrypto: %s backend, %s, %s' % (
oscrypto.backend(),
oscrypto.__version__,
os.path.dirname(oscrypto.__file__)
)
)
|
Python
| 0.997305
|
@@ -1106,24 +1106,145 @@
eturn None%0A%0A
+ if os.sep in mod_dir:%0A append, mod_dir = mod_dir.rsplit(os.sep, 1)%0A path = os.path.join(path, append)%0A%0A
try:%0A
|
eb4714f64a906c7261ab937d709506650c31023e
|
remove print debug
|
intervention_report_analysis/report/status_webkit.py
|
intervention_report_analysis/report/status_webkit.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import time
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.report import report_sxw
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class report_webkit_html(report_sxw.rml_parse):
# Global parameter for manage report data:
partner = {}
def __init__(self, cr, uid, name, context):
super(report_webkit_html, self).__init__(
cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'cr': cr,
'uid': uid,
'load_data': self._load_data,
'write_header': self.write_header,
'write_total': self.write_total,
'table_start': self.table_start,
'table_end': self.table_end,
'dict_operation': self.dict_operation,
})
def dict_operation(self, data, value, operation='add'
#, modify_list=None
):
''' Dict add value to all keys:
'''
#if modify_list is None:
# modify_list = ()
for k in data:
#if modify_list and (k in modify_list):
if operation == 'add':
data[k] += value
elif operation == 'set':
data[k] = value
return
def table_start(self, header=None):
''' Start table element passing header list values
'''
if header is None:
return '<p>#Partner ERR</p><table class="list_table">'
else:
return '''
<p>%(partner)s</p>
<table class="list_table">
''' % header
def table_end(self, ):
''' End table element
'''
return '</table>'
def write_header(self, ):
''' Return HTML code for header
'''
return '''
<tr>
<th>Tipo</th>
<th>Conto</th>
<th>Utente</th>
<th>Data</th>
<th>Ore</th>
<th>Ore totali</th>
<th>Ore interne</th>
<th>Viaggio</th>
</tr>''' # Last 5 cols for value # <th>Cliente</th>
def write_total(self, total, break_level, header=None, new_table=False):
''' Format and return total HTML table row
self: instance object
break_level: values used are partner, type, account, user
total: dict of all totals for every element
'''
print header
return '%s%s%s%s%s' % (
'''
<tr>
<td colspan='2'>Totali:</td>
%s
<td></td>
%s
%s
%s
%s
</tr>''' % (
'<td>%(partner)s - %(type)s - %(account)s</td>' % total[
'number'], # - %(user)s
'<td>%(partner)2.2f - %(type)2.2f - %(account)2.2f' % total[
'hour'],
'<td>%(partner)2.2f - %(type)2.2f - %(account)2.2f' % total[
'hour_total'],
'<td>%(partner)2.2f - %(type)2.2f - %(account)2.2f' % total[
'internal'],
'<td>%(partner)2.2f - %(type)2.2f - %(account)2.2f' % total[
'trip'],
),
self.table_end() if new_table else '',
'<br />' if new_table else '',
self.table_start(header) if new_table else '',
self.write_header() if new_table else '',
)
def _load_data(self, data=None):
''' Load all data for analytic report
Search all intervent in period
'''
# Reset global variables:
self.partner = {}
# Pool used:
int_pool = self.pool.get('hr.analytic.timesheet')
# -------------------------------
# Search depend on filter domain:
# -------------------------------
domain = []
if data['from_date']:
domain.append(
('date_start', '>=', '%s 00:00:00' % data['from_date']))
if data['to_date']:
domain.append(
('date_start', '<', '%s 00:00:00' % data['to_date']))
if data.get('user_id', False):
domain.append(('user_id', '=', data['user_id']))
if data.get('partner_id', False):
domain.append(('partner_id', '=', data['partner_id']))
int_ids = int_pool.search(
self.cr, self.uid, domain)
# Start analyse data intervent:
items = []
for item in int_pool.browse(self.cr, self.uid, int_ids):
#if intervention.intervent_partner_id not in self.partner:
# self.partner[intervention.intervent_partner_id] = []#{} # user
#self.partner[intervention.intervent_partner_id].append(
# intervention)
order = ( # key for order elements:
item.intervent_partner_id.name, # Partner
'Contratti' if item.account_id.partner_id else 'Generico', # With partner
item.account_id.name, # Analytic account
item.user_id.name, # Users
item.date_start, # Date
)
items.append((order, item))
return sorted(items)
report_sxw.report_sxw(
'report.webkitinterventstatus',
'hr.analytic.timesheet',
'addons/intervention_report_analysis/report/status_webkit.mako',
parser=report_webkit_html
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000009
|
@@ -3975,29 +3975,8 @@
'''%0A
- print header%0A
|
64ae848095215715ea7448c517011d64403dee85
|
Remove useless import
|
geotrek/api/mobile/views/trekking.py
|
geotrek/api/mobile/views/trekking.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.db.models import F
from rest_framework_extensions.mixins import DetailSerializerMixin
from geotrek.api.mobile.serializers import trekking as api_serializers
from geotrek.api.mobile import viewsets as api_viewsets
from geotrek.api.v2.functions import Transform, Length, StartPoint
from geotrek.trekking import models as trekking_models
class TrekViewSet(api_viewsets.GeotrekViewset):
serializer_class = api_serializers.TrekListSerializer
serializer_detail_class = api_serializers.TrekDetailSerializer
filter_fields = ('difficulty', 'themes', 'networks', 'practice')
def get_queryset(self, *args, **kwargs):
queryset = trekking_models.Trek.objects.existing()\
.select_related('topo_object', 'difficulty', 'practice') \
.prefetch_related('topo_object__aggregations', 'themes', 'networks', 'attachments', 'information_desks') \
.order_by('pk').annotate(length_2d_m=Length('geom'))
if self.action == 'list':
queryset = queryset.annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID))
else:
queryset = queryset.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
return queryset
class POIViewSet(api_viewsets.GeotrekViewset):
serializer_class = api_serializers.POIListSerializer
serializer_detail_class = api_serializers.POIListSerializer
queryset = trekking_models.POI.objects.existing() \
.select_related('topo_object', 'type', ) \
.prefetch_related('topo_object__aggregations', 'attachments') \
.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID),
geom3d_transformed=Transform(F('geom_3d'), settings.API_SRID)) \
.order_by('pk') # Required for reliable pagination
filter_fields = ('type',)
|
Python
| 0.000004
|
@@ -103,76 +103,8 @@
F%0A%0A
-from rest_framework_extensions.mixins import DetailSerializerMixin%0A%0A
from
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.