commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
098a4437a4de2f3daef86c33ca8e5cfb98ccc0f3
|
modify the refernces in reconst_dsi_metrics.py
|
doc/examples/reconst_dsi_metrics.py
|
doc/examples/reconst_dsi_metrics.py
|
"""
=================================================
Calculate DSI metrics
=================================================
We show how to calculate two DSI-based scalar metrics: return to origin
probability (rtop) [Descoteaux2011] and mean square displacement (msd)
[Wu2007] [Wu2008] on your dataset.
First import the necessary modules:
"""
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.data import get_data, dsi_voxels
from dipy.reconst.dsi import DiffusionSpectrumModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.get_affine()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model and apply it to the data.
"""
dsmodel = DiffusionSpectrumModel(gtab, qgrid_size=35, filter_width=18.5)
"""
Lets just use one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Normalize the signal by the b0
"""
dataslice = dataslice / (dataslice[..., 0, None]).astype(np.float)
"""
Calculate the return to origin probability on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = dsmodel.fit(dataslice).rtop_signal()
"""
Now we calculate the return to origin probability on the propagator,
that corresponds to its central value.
By default the propagator is divided by its sum in order to obtain a properly normalized pdf,
however this normalization changes the values of rtop, therefore in order to compare it
with the rtop previously calculated on the signal we turn the normalized parameter to false.
"""
print('Calculating... rtop_pdf')
rtop_pdf = dsmodel.fit(dataslice).rtop_pdf(normalized=False)
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Leaving the normalized parameter to the default changes the values of the
rtop but not the contrast between the voxels.
"""
print('Calculating... rtop_pdf_norm')
rtop_pdf_norm = dsmodel.fit(dataslice).rtop_pdf()
"""
Let's calculate the mean square displacement on the normalized propagator.
"""
print('Calculating... msd_norm')
msd_norm = dsmodel.fit(dataslice).msd_discrete()
"""
Turning the normalized parameter to false makes it possible to calculate
the mean square displacement on the propagator without normalization.
"""
print('Calculating... msd')
msd = dsmodel.fit(dataslice).msd_discrete(normalized=False)
"""
Show the rtop images and save them in rtop.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf_norm')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='rtop_pdf')
ax3.set_axis_off()
ind = ax3.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('rtop.png')
"""
.. figure:: rtop.png
:align: center
**Return to origin probability**.
Show the msd images and save them in msd.png.
"""
fig = plt.figure(figsize=(7, 3))
ax1 = fig.add_subplot(1, 2, 1, title='msd_norm')
ax1.set_axis_off()
ind = ax1.imshow(msd_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(1, 2, 2, title='msd')
ax2.set_axis_off()
ind = ax2.imshow(msd.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('msd.png')
"""
.. figure:: msd.png
:align: center
**Mean square displacement**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008
.. include:: ../links_names.inc
"""
|
Python
| 0.000014
|
@@ -228,16 +228,17 @@
aux2011%5D
+_
and mea
@@ -274,16 +274,18 @@
%5BWu2007%5D
+_,
%5BWu2008
@@ -285,16 +285,17 @@
%5BWu2008%5D
+_
on your
@@ -4455,17 +4455,16 @@
Measures
-
%0A%09%09%09in q
@@ -4526,18 +4526,11 @@
ng%22,
- %0A
+%0A%09%09
%09IEE
@@ -4592,24 +4592,18 @@
58-865,%0A
-
+%09%09
%092008%0A%0A.
|
c8f560c7fbd65e7071a4743afa008c45442ef7a3
|
wrong varname
|
shop_categories/tests/__init__.py
|
shop_categories/tests/__init__.py
|
# -*- coding: utf-8 -*-
import random
from decimal import Decimal
from shop.models.productmodel import Product # is the overridden CategoryProduct (project.models.product.Product)
from shop_categories.models.categorymodel import Category # is the overridden Category (project.models.category.Category)
from django.test.testcases import TestCase
from django.template.defaultfilters import slugify
def make_category_tree():
top = Category(name='Top category', slug=slugify('Top category'), active=True)
top.save()
level1_first = Category(name='Level1 first', slug=slugify('Level1 first'), active=True, parent=top)
level1_first.save()
level1_second = Category(name='Level1 second', slug=slugify('Level1 second'), active=True, parent=top)
level1_second.save()
level2_first = Category(name='Level2 first', slug=slugify('Level2 first'), active=True, parent=level1_first)
level2_first.save()
level2_first_sub = Category(name='Level2 first sub', slug=slugify('Level2 first sub'), active=True, parent=level2_first)
level2_first_sub.save()
level2_second = Category(name='Level2 second', slug=slugify('Level2 second'), active=True, parent=level1_first)
level2_second.save()
top_two = Category(name='Top category two', slug=slugify('Top category two'), active=True)
top_two.save()
level1_two_first = Category(name='Level1 two first', slug=slugify('Level1 two first'), active=True, parent=top_two)
level1_two_first.save()
level1_two_second = Category(name='Level1 two second', slug=slugify('Level1 two second'), active=True, parent=top_two)
level1_two_second.save()
level1_two_second_sub = Category(name='Level1 two second sub', slug=slugify('Level1 two second sub'), active=True, parent=level1_two_second)
level1_two_first_sub.save()
Category.objects.rebuild()
class CategoryTestCase(TestCase):
def setUp(self):
make_category_tree()
def test_category_unicode(self):
self.assertEqual(unicode(Category.objects.get(slug='level1-first')), 'Top category> Level1 first')
def test_category_short_title(self):
self.assertEqual(Category.objects.get(slug='level1-first').short_title(), 'Level1 first')
def test_category_save(self):
Category.objects.get(slug='level1-first').save()
def test_category_count(self):
self.assertEqual(Category.objects.count(), 6)
def test_category_leaf_path(self):
self.assertEqual(Category.objects.get(slug='level2-first-sub').path, 'top-category/level1-first/level2-first/level2-first-sub')
def test_category_leaf_url(self):
self.assertEqual(Category.objects.get(slug='level2-first-sub').get_absolute_url(), '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/')
class CategoryProductTestCase(TestCase):
def setUp(self):
make_category_tree()
Product(
name='Product 1',
slug=slugify('Product 1'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level2-first-sub')
).save()
Product(
name='Product 2',
slug=slugify('Product 2'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-first')
).save()
Product(
name='Product 3',
slug=slugify('Product 3'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-second')
).save()
Product(
name='Product 4 with other treeid',
slug=slugify('Product 3'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-two-second')
).save()
def test_product_adds_additional_categories(self):
p = Product(
name='Product 5',
slug=slugify('Product 5'),
active=True,
unit_price=Decimal(random.randint(50, 1000)),
main_category=Category.objects.get(slug='level1-second')
)
p.save()
self.assertEqual(p.additional_categories.all()[0].slug, 'level1-second')
def test_product_absolute_url(self):
self.assertEqual(Product.objects.get(slug='product-1').get_absolute_url(),
'/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
def test_product_detail(self):
product_url = Product.objects.get(slug='product-1').get_absolute_url()
response = self.client.get(product_url)
self.assertContains(response, '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
def test_list_products_in_category(self):
category = Category.objects.get(slug='level1-first')
response = self.client.get(category.get_absolute_url())
self.assertContains(response, '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
self.assertContains(response, '/shop/catalog/top-category/level1-first/product/product-2/')
self.assertNotContains(response, '/shop/catalog/top-category/level1-second/product/product-3/')
category = Category.objects.get(slug='level1-second')
response = self.client.get(category.get_absolute_url())
self.assertNotContains(response, '/shop/catalog/top-category/level1-first/level2-first/level2-first-sub/product/product-1/')
self.assertNotContains(response, '/shop/catalog/top-category/level1-first/product/product-2/')
self.assertContains(response, '/shop/catalog/top-category/level1-second/product/product-3/')
category = Category.objects.get(slug='top-category')
response = self.client.get(category.get_absolute_url())
self.assertNotContains(response, '/shop/catalog/top-category-two/level1-two-second/level1-two-second-sub/product/product-4/')
category = Category.objects.get(slug='top-category-two')
response = self.client.get(category.get_absolute_url())
self.assertContains(response, '/shop/catalog/top-category-two/level1-two-second/level1-two-second-sub/product/product-4/')
|
Python
| 0.999043
|
@@ -1809,21 +1809,22 @@
el1_two_
-first
+second
_sub.sav
|
c46e2053c0c093c2ee82f13f48787584d48664af
|
Fix reorder unit test for Django 1.8
|
shuup_tests/front/test_reorder.py
|
shuup_tests/front/test_reorder.py
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.core.urlresolvers import reverse
from django.test.client import Client
from shuup.simple_supplier.module import SimpleSupplierModule
from shuup.testing import factories
from shuup.core.models import ShippingMode
@pytest.mark.django_db
def test_reorder_view():
shop = factories.get_default_shop()
factories.get_default_shipping_method()
factories.get_default_payment_method()
supplier1 = factories.get_supplier(SimpleSupplierModule.identifier, shop=shop)
supplier2 = factories.get_supplier(SimpleSupplierModule.identifier, shop=shop)
assert supplier1.pk != supplier2.pk
product_supplier1 = factories.create_product(
"product_supplier1",
shop=shop,
supplier=supplier1,
default_price=10,
shipping_mode=ShippingMode.NOT_SHIPPED
)
product_supplier2 = factories.create_product(
"product_supplier2",
shop=shop,
supplier=supplier2,
default_price=20,
shipping_mode=ShippingMode.NOT_SHIPPED
)
user = factories.create_random_user("en")
user.set_password("user")
user.save()
customer = factories.create_random_person("en")
customer.user = user
customer.save()
order = factories.create_random_order(
customer=customer,
shop=shop,
products=[product_supplier1, product_supplier2],
completion_probability=0,
random_products=False
)
suppliers = [line.supplier for line in order.lines.products()]
assert supplier1 in suppliers
assert supplier2 in suppliers
client = Client()
client.login(username=user.username, password="user")
# list orders
response = client.get(reverse("shuup:personal-orders"))
assert response.status_code == 200
content = response.content.decode("utf-8")
assert "<td>%d</td>" % order.id in content
assert "<td>Received</td>" in content
# go to order detail
response = client.get(reverse("shuup:show-order", kwargs=dict(pk=order.pk)))
assert response.status_code == 200
content = response.content.decode("utf-8")
assert "Add all products to cart" in content
reorder_url = reverse("shuup:reorder-order", kwargs=dict(pk=order.pk))
assert reorder_url in content
# reorder products
response = client.get(reorder_url)
assert response.status_code == 302
assert response.url == reverse("shuup:basket")
# go to basket
response = client.get(response.url)
assert response.status_code == 200
content = response.content.decode("utf-8")
# ensure the basket contain those products and suppliers
basket_key = client.session["basket_basket_key"]["key"]
from shuup.front.models import StoredBasket
basket = StoredBasket.objects.get(key=basket_key)
lines = basket.data["lines"]
product_supplier = [(line["product_id"], line["supplier_id"]) for line in lines]
assert (product_supplier1.pk, supplier1.pk) in product_supplier
assert (product_supplier2.pk, supplier2.pk) in product_supplier
assert product_supplier1.name in content
assert product_supplier2.name in content
assert "You are unable to proceed to checkout!" not in content
|
Python
| 0.000002
|
@@ -2627,20 +2627,26 @@
onse.url
- ==
+.endswith(
reverse(
@@ -2660,16 +2660,17 @@
basket%22)
+)
%0A%0A #
|
8092ac34f95280adf884336999b481ef5241c2cb
|
update data container description to make sure that scalar values are returned as scalar
|
simphony/scripts/cuba-generate.py
|
simphony/scripts/cuba-generate.py
|
import click
import yaml
# Cuba keywords that are excludes from DataContainers
CUBA_DATA_CONTAINER_EXLCUDE = ['Id', 'Position']
@click.group()
def cli():
""" Auto-generate code from cuba yaml description. """
@cli.command()
@click.argument('input', type=click.File('rb'))
@click.argument('output', type=click.File('wb'))
def python(input, output):
""" Create the CUBA Enum for the DataContainer.
"""
keywords = yaml.safe_load(input)
lines = [
'# code auto-generated by the cuba-generate.py script.\n',
'from enum import IntEnum, unique\n',
'\n',
'\n',
'@unique\n',
'class CUBA(IntEnum):\n',
'\n']
template = " {} = {}\n"
for keyword in keywords:
if keyword['name'] in CUBA_DATA_CONTAINER_EXLCUDE:
continue
lines.append(template.format(keyword['key'], keyword['number']))
output.writelines(lines)
@cli.command()
@click.argument('input', type=click.File('rb'))
@click.argument('output', type=click.File('wb'))
def table(input, output):
""" Create the CUBA DataContainer Table descriptions.
"""
keywords = yaml.safe_load(input)
lines = [
'# code auto-generated by the cuba-generate.py script.\n',
'import tables\n',
'\n',
'\n']
# create Data table description
lines.extend([
'class Data(tables.IsDescription):\n',
'\n'])
template = " {} = tables.{}Col(pos={}, shape=({}))\n"
data_types = {
'string': 'String',
'double': 'Float64',
'integer': 'Int32'}
position = 0
for keyword in keywords:
if keyword['name'] in CUBA_DATA_CONTAINER_EXLCUDE:
continue
if len(keyword['shape']) == 1:
shape = str(keyword['shape'][0]) + ','
else:
shape = ','.join(map(str, keyword['shape']))
lines.append(template.format(
keyword['key'].lower(),
data_types[keyword['type']],
position,
shape))
position += 1
lines.append('\n\n')
# create Mask table description
mask_size = position
lines.extend([
'class Mask(tables.IsDescription):\n',
' mask = tables.BoolCol(shape=({},))\n'.format(mask_size)])
output.writelines(lines)
if __name__ == '__main__':
cli()
|
Python
| 0.000001
|
@@ -1416,69 +1416,8 @@
'%5D)%0A
- template = %22 %7B%7D = tables.%7B%7DCol(pos=%7B%7D, shape=(%7B%7D))%5Cn%22%0A
@@ -1657,35 +1657,219 @@
if
-len(keyword%5B'shape'%5D) == 1:
+keyword%5B'type'%5D == 'string':%0A template = %22 %7B%7D = tables.%7B%7DCol(pos=%7B%7D, itemsize=%7B%7D)%5Cn%22%0A shape = keyword%5B'shape'%5D%5B0%5D%0A else:%0A template = %22 %7B%7D = tables.%7B%7DCol(pos=%7B%7D%7B%7D)%5Cn%22
%0A
@@ -1889,12 +1889,8 @@
e =
-str(
keyw
@@ -1905,18 +1905,185 @@
pe'%5D
-%5B0%5D) + ','
+%0A if len(shape) == 1:%0A if shape%5B0%5D == 1:%0A shape = ''%0A else:%0A shape = ', shape=%7B%7D'.format(shape%5B0%5D)
%0A
@@ -2079,38 +2079,46 @@
ape%5B0%5D)%0A
+
else:%0A
+
shap
@@ -2120,16 +2120,59 @@
shape =
+ ', shape=(%7B%7D)'.format(%0A
','.joi
@@ -2196,24 +2196,25 @@
d%5B'shape'%5D))
+)
%0A lin
|
8cab7a7fffd31c3d7e924faa3f860e4e90579a4d
|
Add default GTM config key
|
sigal/settings.py
|
sigal/settings.py
|
# -*- coding:utf-8 -*-
# Copyright (c) 2009-2016 - Simon Conseil
# Copyright (c) 2013 - Christophe-Marie Duquesne
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import locale
import logging
import os
from os.path import abspath, isabs, join, normpath
from pprint import pformat
from .compat import PY2, text_type
_DEFAULT_CONFIG = {
'albums_sort_attr': 'name',
'albums_sort_reverse': False,
'autorotate_images': True,
'colorbox_column_size': 4,
'copy_exif_data': False,
'destination': '_build',
'files_to_copy': (),
'google_analytics': '',
'ignore_directories': [],
'ignore_files': [],
'img_processor': 'ResizeToFit',
'img_size': (640, 480),
'index_in_url': False,
'jpg_options': {'quality': 85, 'optimize': True, 'progressive': True},
'keep_orig': False,
'links': '',
'locale': '',
'make_thumbs': True,
'medias_sort_attr': 'filename',
'medias_sort_reverse': False,
'mp4_options': ['-crf', '23', '-strict', '-2'],
'orig_dir': 'original',
'orig_link': False,
'output_filename': 'index.html',
'piwik': {'tracker_url': '', 'site_id': 0},
'plugin_paths': [],
'plugins': [],
'show_map': False,
'source': '',
'theme': 'colorbox',
'thumb_dir': 'thumbnails',
'thumb_fit': True,
'thumb_prefix': '',
'thumb_size': (200, 150),
'thumb_suffix': '',
'thumb_video_delay': '0',
'title': '',
'use_assets_cdn': True,
'use_orig': False,
'video_format': 'webm',
'video_size': (480, 360),
'watermark': '',
'webm_options': ['-crf', '10', '-b:v', '1.6M',
'-qmin', '4', '-qmax', '63'],
'write_html': True,
'zip_gallery': False,
'zip_media_format': 'resized',
}
class Status(object):
SUCCESS = 0
FAILURE = 1
def get_thumb(settings, filename):
"""Return the path to the thumb.
examples:
>>> default_settings = create_settings()
>>> get_thumb(default_settings, "bar/foo.jpg")
"bar/thumbnails/foo.jpg"
>>> get_thumb(default_settings, "bar/foo.png")
"bar/thumbnails/foo.png"
for videos, it returns a jpg file:
>>> get_thumb(default_settings, "bar/foo.webm")
"bar/thumbnails/foo.jpg"
"""
path, filen = os.path.split(filename)
name, ext = os.path.splitext(filen)
# FIXME: replace this list with Video.extensions
if ext.lower() in ('.mov', '.avi', '.mp4', '.webm', '.ogv'):
ext = '.jpg'
return join(path, settings['thumb_dir'], settings['thumb_prefix'] +
name + settings['thumb_suffix'] + ext)
def read_settings(filename=None):
"""Read settings from a config file in the source_dir root."""
logger = logging.getLogger(__name__)
logger.info("Reading settings ...")
settings = _DEFAULT_CONFIG.copy()
if filename:
logger.debug("Settings file: %s", filename)
settings_path = os.path.dirname(filename)
tempdict = {}
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, tempdict)
settings.update((k, v) for k, v in tempdict.items()
if k not in ['__builtins__'])
# Make the paths relative to the settings file
paths = ['source', 'destination', 'watermark']
if os.path.isdir(join(settings_path, settings['theme'])) and \
os.path.isdir(join(settings_path, settings['theme'],
'templates')):
paths.append('theme')
enc = locale.getpreferredencoding() if PY2 else None
for p in paths:
# paths must to be unicode strings so that os.walk will return
# unicode dirnames and filenames
if PY2 and isinstance(settings[p], str):
settings[p] = settings[p].decode(enc)
path = settings[p]
if path and not isabs(path):
settings[p] = abspath(normpath(join(settings_path, path)))
logger.debug("Rewrite %s : %s -> %s", p, path, settings[p])
if settings['title'] and not isinstance(settings['title'], text_type):
settings['title'] = settings['title'].decode('utf8')
for key in ('img_size', 'thumb_size', 'video_size'):
w, h = settings[key]
if h > w:
settings[key] = (h, w)
logger.warning("The %s setting should be specified with the "
"largest value first.", key)
if not settings['img_processor']:
logger.info('No Processor, images will not be resized')
logger.debug('Settings:\n%s', pformat(settings, width=120))
return settings
def create_settings(**kwargs):
"""Create a new default setting copy and initialize it with kwargs."""
settings = _DEFAULT_CONFIG.copy()
settings.update(kwargs)
return settings
|
Python
| 0
|
@@ -1578,24 +1578,57 @@
ytics': '',%0A
+ 'google_tag_manager': '', %0A
'ignore_
|
3159f3fa6d4d055e8a53a0b4f1d798397cc3c3a3
|
The alteration of the context has no effect
|
base_report_to_printer/report.py
|
base_report_to_printer/report.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, exceptions, _
class Report(models.Model):
_inherit = 'report'
def _can_send_report(self, cr, uid, ids, behaviour, printer, document,
context=None):
"""Predicate that decide if report can be sent to printer
If you want to prevent `get_pdf` to send report you can set
the `must_skip_sent_to_printer` key to True in the context
"""
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
if context.get('must_skip_sent_to_printer'):
return False
if behaviour['action'] == 'server' and printer and document:
return True
return False
def print_document(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Print a document, do not return the document file """
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
local_context = dict(context)
local_context['must_skip_sent_to_printer'] = True
document = self.get_pdf(cr, uid, ids, report_name,
html=html, data=data, context=local_context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
if not printer:
raise exceptions.Warning(
_('No printer configured to print this report.')
)
return printer.print_document(report, document, report.report_type)
def get_pdf(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Generate a PDF and returns it.
If the action configured on the report is server, it prints the
generated document as well.
"""
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
document = super(Report, self).get_pdf(cr, uid, ids, report_name,
html=html, data=data,
context=context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
can_send_report = self._can_send_report(cr, uid, ids,
behaviour, printer, document,
context=context)
if can_send_report:
printer.print_document(report, document, report.report_type)
context['must_skip_sent_to_printer'] = True
return document
|
Python
| 0.999999
|
@@ -2795,102 +2795,8 @@
%22%22%22%0A
- if context is None:%0A context = self.pool%5B'res.users'%5D.context_get(cr, uid)%0A
@@ -3463,64 +3463,8 @@
pe)%0A
- context%5B'must_skip_sent_to_printer'%5D = True%0A
|
0c28e5fa23d7330db3bbed94594706f15a46b910
|
Switch to 1000 iterations for end-to-end tests.
|
end-to-end/demotest.py
|
end-to-end/demotest.py
|
#!/usr/bin/env python
import sys
import json
import os
import requests
import time
import yaml
# Yes, it's a terrible idea to use skip cert verification for TLS.
# We really don't care for this test though.
import urllib3
urllib3.disable_warnings()
def call(url, headers=None, iterations=1):
got = {}
for x in range(iterations):
# Yes, it's a terrible idea to use skip cert verification for TLS.
# We really don't care for this test though.
result = requests.get(url, headers=headers, verify=False)
version = 'unknown'
sys.stdout.write('.')
sys.stdout.flush()
if result.status_code != 200:
version='failure %d' % result.status_code
elif result.text.startswith('VERSION '):
version=result.text[len('VERSION '):]
else:
version='unknown %s' % result.text
got.setdefault(version, 0)
got[version] += 1
sys.stdout.write("\n")
sys.stdout.flush()
return got
def test_demo(base, v2_wanted):
url = "%s/demo/" % base
attempts = 3
while attempts > 0:
print("2.0.0: attempts left %d" % attempts)
got = call(url, iterations=100)
print(got)
v2_seen = got.get('2.0.0', 0)
delta = abs(v2_seen - v2_wanted)
rc = (delta <= 2)
print("2.0.0: wanted %d, got %d (delta %d) => %s" %
(v2_wanted, v2_seen, delta, "pass" if rc else "FAIL"))
if rc:
return rc
attempts -= 1
print("waiting for retry")
time.sleep(5)
return False
def test_from_yaml(base, yaml_path):
spec = yaml.safe_load(open(yaml_path, "r"))
url = spec['url'].replace('{BASE}', base)
test_num = 0
rc = True
for test in spec['tests']:
test_num += 1
name = test.get('name', "%s.%d" % (os.path.basename(yaml_path), test_num))
headers = test.get('headers', None)
host = test.get('host', None)
versions = test.get('versions', None)
iterations = test.get('iterations', 100)
if not versions:
print("missing versions in %s?" % name)
print("%s" % yaml.safe_dump(test))
return False
if host:
if not headers:
headers = {}
headers['Host'] = host
attempts = 3
while attempts > 0:
print("%s: attempts left %d" % (name, attempts))
print("%s: headers %s" % (name, headers))
got = call(url, headers=headers, iterations=iterations)
print("%s: %s" % (name, json.dumps(got)))
test_ok = True
for version, wanted_count in versions.items():
got_count = got.get(version, 0)
delta = abs(got_count - wanted_count)
print("%s %s: wanted %d, got %d (delta %d)" %
(name, version, wanted_count, got_count, delta))
if delta > 2:
test_ok = False
if test_ok:
print("%s: passed" % name)
break
else:
attempts -= 1
if attempts <= 0:
print("%s: FAILED" % name)
rc = False
return rc
if __name__ == "__main__":
base = sys.argv[1]
if not (base.startswith("http://") or base.startswith("https://")):
base = "http://%s" % base
v2_percent = None
try:
v2_percent = int(sys.argv[2])
except ValueError:
pass
if v2_percent != None:
rc = test_demo(base, v2_percent)
else:
rc = test_from_yaml(base, sys.argv[2])
if rc:
sys.exit(0)
else:
print("FAILED")
sys.exit(1)
|
Python
| 0
|
@@ -1195,16 +1195,17 @@
ions=100
+0
)%0A%0A
@@ -1236,16 +1236,18 @@
_seen =
+((
got.get(
@@ -1253,24 +1253,36 @@
('2.0.0', 0)
+ + 5) // 10)
%0A del
|
e89e721225e916f4c2514f4a6568571abfc2acc0
|
Add slides frame simibar
|
lib/plotter/matching/__init__.py
|
lib/plotter/matching/__init__.py
|
__all__ = ["core", "single_matching_plotter"]
from core import MatchingPlotterBase
class MatchingPlotter(MatchingPlotterBase):
def __init__(self, root, name):
"""
Try to show one matching pairs
use set_data to set matched results:
array of `sid`, `fid`, `matches`
"""
MatchingPlotterBase.__init__(self, root, name)
def __match_info(self, ax, df=None, sid=-1, fid=0):
self.set_matched_pair(sid, fid)
view = self.get_view()
ax.imshow(view[:, :, [2, 1, 0]])
info = "S-{}, F-{}, df: {:5.2f}({})".\
format(sid, fid, df.dist.mean(), len(df))
ax.set_title(info)
def result_grid(self, fig, row=4, col=4, from_=1):
start = from_ - 1
end = from_+(row*col) - 1
for mi, mc in enumerate(self.df[start:end], 1):
ax = fig.add_subplot(15, 4, mi)
self.__match_info(ax, **mc)
def frame_slides_relation(self, ax, matches, answer):
"""
Print frame to slides relation by input function
"""
x = [s["sid"] for s in matches]
y = [s["df"].dist.mean() for s in matches]
ax.plot(x, y)
def slides_frames_similarity(self, sids, fids, sims):
pass
|
Python
| 0
|
@@ -40,16 +40,77 @@
tter%22%5D%0A%0A
+from lib.exp.evaluator.ground_truth import GroundTruth as GT%0A
from cor
@@ -1306,8 +1306,796 @@
pass%0A
+%0A def slice_bar(self, ax, x, y, z, start, size, cmm):%0A end = start+size%0A gt = GT(self.root, self.name)%0A for fi, mv, fid in zip(range(1, size+1), z%5Bstart: end%5D, y%5Bstart:end%5D):%0A cr = %5Bcmm(fi*3./size)%5D*len(mv)%0A asid = int(gt.answer(fid))%0A fac = 1%0A if asid %3E 0:%0A print asid, fid%0A cr%5Basid-1%5D = '#FF5698'%0A else:%0A cr = %5B'#aa77FF'%5D*len(mv)%0A mv = mv/max(mv)%0A fac = max(mv)%0A ax.bar(x, mv, fid, zdir='y', color=cr, alpha=0.4)%0A mi = min(xrange(len(mv)), key=mv.__getitem__)%0A ax.bar(%5Bx%5Bmi%5D%5D, %5Bmv%5Bmi%5D*fac/2.0%5D, fid,%0A zdir='y', color=%5B'#44FF32'%5D, alpha=.8)%0A ax.view_init(elev=60., azim=120)%0A
|
95b08f0cb82fa376a6f07d5395bcba343a131dea
|
update labels
|
plantcv/plantcv/hyperspectral/analyze_spectral.py
|
plantcv/plantcv/hyperspectral/analyze_spectral.py
|
# Analyze signal data in Thermal image
import os
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plotnine import ggplot, aes, geom_line, scale_x_continuous
def analyze_spectral(array, header_dict, mask, histplot=True):
"""This extracts the hyperspectral reflectance values of each pixel writes the values out to
a file. It can also print out a histogram plot of pixel intensity
and a pseudocolor image of the plant.
Inputs:
array = numpy array of thermal values
header_dict =
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
Returns:
analysis_img = output image
:param array: numpy array
:param header_dict: dict
:param mask: numpy array
:param histplot: bool
:return analysis_img: ggplot
"""
params.device += 1
# Store debug mode
debug = params.debug
params.debug = None
# List of wavelengths recorded created from parsing the header file will be string, make list of floats
wavelength_data = array[np.where(mask > 0)]
wavelength_freq = wavelength_data.mean(axis=0)
min_wavelength = int(np.ceil(float(header_dict["wavelength"][0])))
max_wavelength = int(np.ceil(float(header_dict["wavelength"][-1])))
new_wavelengths = []
new_freq = []
for i, wavelength in enumerate(header_dict["wavelength"]):
new_wavelengths.append(float(wavelength))
new_freq.append((wavelength_freq[i]).astype(np.float))
maxreflectance = np.amax(wavelength_data)
minreflectance = np.amin(wavelength_data)
avgreflectance = np.average(wavelength_data)
medianreflectance = np.median(wavelength_data)
# Store data into outputs class
outputs.add_observation(variable='max_reflectance', trait='maximum reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(maxreflectance), label='reflectance')
outputs.add_observation(variable='min_reflectance', trait='minimum reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(minreflectance), label='reflectance')
outputs.add_observation(variable='mean_reflectance', trait='mean_reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(avgreflectance), label='reflectance')
outputs.add_observation(variable='median_reflectance', trait='median_reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(medianreflectance), label='reflectance')
outputs.add_observation(variable='spectral_frequencies', trait='thermal spectral_frequencies',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='frequency', datatype=list,
value=new_freq, label=new_wavelengths)
params.debug = debug
analysis_img = None
if histplot is True:
dataset = pd.DataFrame({'Wavelength': new_wavelengths,
'Reflectance': wavelength_freq})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Wavelength',
y='Reflectance'))
+ geom_line(color='purple')
+ scale_x_continuous(breaks=list(range(min_wavelength, max_wavelength, 50)))
)
analysis_img = fig_hist
if params.debug == "print":
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_therm_histogram.png'))
elif params.debug == "plot":
print(fig_hist)
return analysis_img
|
Python
| 0.000001
|
@@ -1970,39 +1970,36 @@
ectral', scale='
-degrees
+none
', datatype=floa
@@ -2248,39 +2248,36 @@
ectral', scale='
-degrees
+none
', datatype=floa
@@ -2421,25 +2421,25 @@
trait='mean
-_
+
reflectance'
@@ -2524,39 +2524,36 @@
ectral', scale='
-degrees
+none
', datatype=floa
@@ -2697,33 +2697,33 @@
', trait='median
-_
+
reflectance',%0A
@@ -2816,15 +2816,12 @@
le='
-degrees
+none
', d
@@ -2988,24 +2988,16 @@
it='
-thermal
spectral
_fre
@@ -2984,33 +2984,33 @@
trait='spectral
-_
+
frequencies',%0A
|
cd59979ab446d7613ec7df5d5737539464918edf
|
Fix span boundary handling in Spanish noun_chunks (#5860)
|
spacy/lang/es/syntax_iterators.py
|
spacy/lang/es/syntax_iterators.py
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import NOUN, PROPN, PRON, VERB, AUX
from ...errors import Errors
def noun_chunks(doclike):
doc = doclike.doc
if not doc.is_parsed:
raise ValueError(Errors.E029)
if not len(doc):
return
np_label = doc.vocab.strings.add("NP")
left_labels = ["det", "fixed", "neg"] # ['nunmod', 'det', 'appos', 'fixed']
right_labels = ["flat", "fixed", "compound", "neg"]
stop_labels = ["punct"]
np_left_deps = [doc.vocab.strings.add(label) for label in left_labels]
np_right_deps = [doc.vocab.strings.add(label) for label in right_labels]
stop_deps = [doc.vocab.strings.add(label) for label in stop_labels]
token = doc[0]
while token and token.i < len(doclike):
if token.pos in [PROPN, NOUN, PRON]:
left, right = noun_bounds(
doc, token, np_left_deps, np_right_deps, stop_deps
)
yield left.i, right.i + 1, np_label
token = right
token = next_token(token)
def is_verb_token(token):
return token.pos in [VERB, AUX]
def next_token(token):
try:
return token.nbor()
except IndexError:
return None
def noun_bounds(doc, root, np_left_deps, np_right_deps, stop_deps):
left_bound = root
for token in reversed(list(root.lefts)):
if token.dep in np_left_deps:
left_bound = token
right_bound = root
for token in root.rights:
if token.dep in np_right_deps:
left, right = noun_bounds(
doc, token, np_left_deps, np_right_deps, stop_deps
)
if list(
filter(
lambda t: is_verb_token(t) or t.dep in stop_deps,
doc[left_bound.i : right.i],
)
):
break
else:
right_bound = right
return left_bound, right_bound
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
|
Python
| 0
|
@@ -722,65 +722,28 @@
-token = doc%5B0%5D%0A while token and token.i %3C len(
+for token in
doclike
-)
:%0A
|
3d0886a277aa8cf3525a0915c3795f37f3d2cd85
|
fix -A
|
lib/svtplay_dl/service/urplay.py
|
lib/svtplay_dl/service/urplay.py
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import json
import copy
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils.urllib import urljoin, urlparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.log import log
from svtplay_dl.error import ServiceError
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils import filenamify
class Urplay(Service, OpenGraphThumbMixin):
supported_domains = ['urplay.se', 'ur.se', 'betaplay.ur.se', 'urskola.se']
def get(self):
data = self.get_urldata()
match = re.search(r"urPlayer.init\((.*)\);", data)
if not match:
yield ServiceError("Can't find json info")
return
if self.exclude():
yield ServiceError("Excluding video")
return
data = match.group(1)
jsondata = json.loads(data)
if len(jsondata["subtitles"]) > 0:
for sub in jsondata["subtitles"]:
if "label" in sub:
absurl = urljoin(self.url, sub["file"].split(",")[0])
if absurl.endswith("vtt"):
subtype = "wrst"
else:
subtype = "tt"
if self.options.get_all_subtitles:
yield subtitle(copy.copy(self.options), subtype, absurl, "-" + filenamify(sub["label"]))
else:
yield subtitle(copy.copy(self.options), subtype, absurl)
if "streamer" in jsondata["streaming_config"]:
basedomain = jsondata["streaming_config"]["streamer"]["redirect"]
else:
lbjson = self.http.request("get", "https:{}".format(jsondata["streaming_config"]["loadbalancer"])).text
lbjson = json.loads(lbjson)
basedomain = lbjson["redirect"]
http = "https://{0}/{1}".format(basedomain, jsondata["file_http"])
hd = None
if len(jsondata["file_http_hd"]) > 0:
http_hd = "https://{0}/{1}".format(basedomain, jsondata["file_http_hd"])
hls_hd = "{0}{1}".format(http_hd, jsondata["streaming_config"]["http_streaming"]["hls_file"])
hd = True
hls = "{0}{1}".format(http, jsondata["streaming_config"]["http_streaming"]["hls_file"])
streams = hlsparse(self.options, self.http.request("get", hls), hls)
for n in list(streams.keys()):
yield streams[n]
if hd:
streams = hlsparse(self.options, self.http.request("get", hls_hd), hls_hd)
for n in list(streams.keys()):
yield streams[n]
def find_all_episodes(self, options):
parse = urlparse(self.url)
episodes = []
if parse.netloc == "urskola.se":
data = self.get_urldata()
match = re.search('data-limit="[^"]+" href="([^"]+)"', data)
if match:
res = self.http.get(urljoin("http://urskola.se", match.group(1)))
data = res.text
tags = re.findall('<a class="puff tv video" title="[^"]+" href="([^"]+)"', data)
for i in tags:
url = urljoin("http://urskola.se/", i)
if url not in episodes:
episodes.append(url)
else:
match = re.search("/program/\d+-(\w+)-", parse.path)
if not match:
log.error("Can't find any videos")
return None
keyword = match.group(1)
all_links = re.findall('card-link" href="([^"]+)"', self.get_urldata())
for i in all_links:
match = re.search("/program/\d+-(\w+)-", i)
if match and match.group(1) == keyword:
episodes.append(urljoin("https://urplay.se/", i))
episodes_new = []
n = 0
for i in episodes:
if n == options.all_last:
break
if i not in episodes_new:
episodes_new.append(i)
n += 1
return episodes_new
|
Python
| 0
|
@@ -3027,32 +3027,33 @@
et(urljoin(%22http
+s
://urskola.se%22,
@@ -3147,16 +3147,24 @@
s=%22puff
+program
tv video
@@ -3194,32 +3194,56 @@
%5B%5E%22%5D+)%22', data)%0A
+ print(tags)%0A
for
@@ -3288,16 +3288,17 @@
in(%22http
+s
://ursko
|
d04052a5cf330ef7b9ec977361bc6fd5558a311d
|
Fix another test.
|
apps/sumo/tests/test_middleware.py
|
apps/sumo/tests/test_middleware.py
|
from django.http import HttpResponsePermanentRedirect
from nose.plugins.skip import SkipTest
from nose.tools import eq_
from test_utils import RequestFactory
from sumo.middleware import PlusToSpaceMiddleware
from sumo.tests import TestCase
from sumo.urlresolvers import get_best_language
class TrailingSlashMiddlewareTestCase(TestCase):
def test_no_trailing_slash(self):
response = self.client.get(u'/en-US/ohnoez')
eq_(response.status_code, 404)
def test_404_trailing_slash(self):
response = self.client.get(u'/en-US/ohnoez/')
eq_(response.status_code, 404)
def test_remove_trailing_slash(self):
response = self.client.get(u'/en-US/home/?xxx=\xc3')
eq_(response.status_code, 301)
assert response['Location'].endswith('/en-US/home?xxx=%C3%83')
class PlusToSpaceTestCase(TestCase):
rf = RequestFactory()
ptsm = PlusToSpaceMiddleware()
def test_plus_to_space(self):
"""Pluses should be converted to %20."""
request = self.rf.get('/url+with+plus')
response = self.ptsm.process_request(request)
assert isinstance(response, HttpResponsePermanentRedirect)
eq_('/url%20with%20plus', response['location'])
def test_query_string(self):
"""Query strings should be maintained."""
request = self.rf.get('/pa+th', {'a': 'b'})
response = self.ptsm.process_request(request)
eq_('/pa%20th?a=b', response['location'])
def test_query_string_unaffected(self):
"""Pluses in query strings are not affected."""
request = self.rf.get('/pa+th?var=a+b')
response = self.ptsm.process_request(request)
eq_('/pa%20th?var=a+b', response['location'])
def test_pass_through(self):
"""URLs without a + should be left alone."""
request = self.rf.get('/path')
assert not self.ptsm.process_request(request)
def test_with_locale(self):
"""URLs with a locale should keep it."""
request = self.rf.get('/pa+th', {'a': 'b'})
request.locale = 'ru'
response = self.ptsm.process_request(request)
eq_('/ru/pa%20th?a=b', response['location'])
def test_smart_query_string(self):
"""The request QUERY_STRING might not be unicode."""
request = self.rf.get(u'/pa+th')
request.locale = 'ja'
request.META['QUERY_STRING'] = 's=\xe3\x82\xa2'
response = self.ptsm.process_request(request)
eq_('/ja/pa%20th?s=%E3%82%A2', response['location'])
class BestLanguageTests(TestCase):
def test_english_only(self):
"""Any way you slice it, this should be 'en-US'."""
best = get_best_language('en-US, en;q=0.5')
eq_('en-US', best)
def test_exact_match_language(self):
"""Exact match of a locale with only a language subtag."""
best = get_best_language('fr, en-US;q=0.5')
eq_('fr', best)
def test_exact_match_region(self):
"""Exact match of a locale with language and region subtags."""
best = get_best_language('pt-BR, en-US;q=0.5')
eq_('pt-BR', best)
def test_english_alias(self):
"""Our canonical English locale is 'en-US'."""
best = get_best_language('en, fr;q=0.5')
eq_('en-US', best)
def test_overspecific_alias(self):
"""Our Irish locale is 'ga-IE'."""
best = get_best_language('ga, fr;q=0.5')
eq_('ga-IE', best)
def test_prefix_alias(self):
"""A generic request for Portuguese should go to 'pt-PT'."""
best = get_best_language('pt, fr;q=0.5')
eq_('pt-PT', best)
def test_nonprefix_alias(self):
"""We only have a single Norwegian locale."""
raise SkipTest("Figure out what's up with the Norwegian locales")
best = get_best_language('nn-NO, nb-NO;q=0.7, fr;q=0.3')
eq_('no', best)
def test_script_alias(self):
"""Our traditional Chinese locale is 'zh-TW'."""
best = get_best_language('zh-Hant, fr;q=0.5')
eq_('zh-TW', best)
def test_non_existent(self):
"""If we don't have any matches, return false."""
best = get_best_language('qaz-ZZ, qaz;q=0.5')
eq_(False, best)
def test_second_choice(self):
"""Respect the user's preferences during the first pass."""
best = get_best_language('fr-FR, de;q=0.5')
eq_('de', best)
def test_prefix_fallback(self):
"""No matches during the first pass. Fall back to prefix."""
best = get_best_language('fr-FR, de-DE;q=0.5')
eq_('fr', best)
def test_english_fallback(self):
"""Fall back to our canonical English locale, 'en-US'."""
best = get_best_language('en-GB, fr-FR;q=0.5')
eq_('en-US', best)
def test_non_existent_fallback(self):
"""Respect user's preferences as much as possible."""
best = get_best_language('qaz-ZZ, fr-FR;q=0.5')
eq_('fr', best)
|
Python
| 0.000037
|
@@ -684,20 +684,26 @@
'/en-US/
-home
+docs/files
/?xxx=%5Cx
@@ -803,12 +803,18 @@
-US/
-home
+docs/files
?xxx
|
d5cfab00d7667b42749f2ef054536fdaa8cb9d80
|
Fix bug with completely empty apt_config.yaml.
|
apt_config_tool/apt_config_tool.py
|
apt_config_tool/apt_config_tool.py
|
#!/usr/bin/env python2.7
# -*- mode: python; encoding: utf-8; -*-
from __future__ import division, absolute_import, unicode_literals, print_function
from future_builtins import *
"""apt-config-tool: Set up apt and install packages as part of a scripted provisioning process (such as building a Dockerfile).
(c) 2014-2015 Kevin Kelley <kelleyk@kelleyk.net>. All rights reserved.
This script is made available under a BSD license. See the LICENSE file for details.
It comes with no warranty express or implied. If it breaks, you get to keep both pieces.
"""
import os
import os.path
import re
import sys
import json
import logging
import argparse
import subprocess
from itertools import chain
from pipes import quote as shell_quote # shlex.quote in Py3
import yaml
from intensional import Re
from .apt_proxy_utils import get_apt_proxy
log = logging.getLogger('apt-config-tool')
def build_parser():
p = argparse.ArgumentParser()
# sp = p.add_subparsers(help='command help')
p.add_argument('-v', '--verbose', action='store_true')
# p_preprocess = sp.add_parser('preprocess', help='Convert YAML configuration to ready-to-run shell script.')
p_preprocess = p
p_preprocess.add_argument('input_file', metavar='apt-config.yaml')
p_preprocess.add_argument('output_file', metavar='apt-config.sh', nargs='?')
# p_preprocess.add_argument('--apt-proxy', metavar='proxy url', nargs='?', default=True) # TODO: Should have a flag to set/disable this.
p_preprocess.set_defaults(func=cmd_preprocess)
# p_run = sp.add_parser('run')
# p_run.add_argument('config_file', metavar='path', nargs='?', default='/opt/apt-config-tool/apt-config.json')
# p_run.set_defaults(func=cmd_run)
return p
def cmd_preprocess(args):
proxy = get_apt_proxy()
with open(args.input_file, 'r') as f:
data = yaml.load(f.read())
output = [
'#!/usr/bin/env bash',
'',
'set -e',
'export DEBIAN_FRONTEND=noninteractive',
'',
]
if proxy:
output.append('echo {} > /etc/apt/apt.conf.d/80proxy'.format(shell_quote(
'Acquire::http::Proxy "{}";'.format(proxy))))
# TODO: We really only need wget if we're going to have to fetch a key.
output.append('apt-get update') # Must update first; otherwise, there are no package lists.
output.extend(apt_install(('wget',)))
for key_spec in data.get('keys') or ():
output.extend(install_key(key_spec))
for source_name, source_spec in (data.get('sources') or {}).items():
output.extend(install_source(source_name, source_spec))
for ppa_spec in data.get('ppas') or ():
output.extend(install_ppa(ppa_spec))
output.extend(('apt-get update', 'apt-get dist-upgrade -yq'))
output.extend(apt_install(data.get('packages') or ()))
output.extend((
'apt-get autoremove -y',
'apt-get clean',
'rm -rf /var/lib/apt/lists/*',
))
if proxy:
output.append('rm /etc/apt/apt.conf.d/80proxy')
output = '\n'.join(output)
if not args.output_file:
print(output)
else:
skip_write = False
if os.path.exists(args.output_file):
with open(args.output_file, 'r') as f:
existing_output = f.read()
if existing_output == output:
print('{}: Output would be unchanged; not modifying the output file!'.format(sys.argv[0]))
skip_write = True
if not skip_write:
with open(args.output_file, 'w') as f:
f.write(output)
def apt_install(packages):
return ('apt-get install -yq --no-install-recommends ' + ' '.join(packages),)
# def cmd_run(args):
# with open(args.config_file, 'r') as f:
# data = json.loads(f.read())
# pass
def install_key(key_spec):
if 'url' in key_spec:
return ('wget -qO - {} | apt-key add -'.format(shell_quote(key_spec['url'])),)
elif 'keyid' in key_spec:
keyserver = key_spec.get('keyserver', 'hkp://keyserver.ubuntu.com:80')
keyid = key_spec['keyid']
return ('apt-key adv --keyserver {} --recv {}'.format(shell_quote(keyserver), shell_quote(keyid)),)
else:
raise Exception('Not sure what to do with key description: {}'.format(key_spec))
def install_source(source_name, source_spec):
for key_spec in source_spec['keys']:
for line in install_key(key_spec):
yield line
for line in source_spec['sources']:
yield 'echo {} >> /etc/apt/sources.list.d/{}.list'.format(shell_quote(line), source_name)
def install_ppa(ppa_spec):
yield 'add-apt-repository -y ppa:{}'.format(ppa_spec)
def main():
args = build_parser().parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
logging.debug('herro')
# log.setLevel(logging.DEBUG)
return args.func(args)
if __name__ == '__main__':
raise SystemExit(main())
|
Python
| 0
|
@@ -1864,16 +1864,22 @@
.read())
+ or %7B%7D
%0A%0A ou
|
d07b48c018d8edf5c8dc3689e22a0c4e551f79a7
|
Add single file output option
|
cube.py
|
cube.py
|
#!/usr/bin/env python
import numpy as np
from scipy import ndimage, misc
import sys, math, os
import argparse
parser = argparse.ArgumentParser(description='Turn a panorama image into a cube map (6 images)')
parser.add_argument("--size", default=512, type=int, help="Size of output image sides")
parser.add_argument("--prefix", default="side_", help="Prefix of output images")
parser.add_argument("--type", default="jpg", help="File Type to save as, jpg, png etc.")
parser.add_argument("--dir", default="./", help="Directory in which to put the output files")
parser.add_argument("input", help="Input panorama file")
args = parser.parse_args()
SIZE = args.size
HSIZE = SIZE / 2.0
im = ndimage.imread(args.input)
side_im = np.zeros((SIZE, SIZE, 3), np.uint8)
for i in range(0,6):
pid = os.fork()
if pid != 0:
continue
it = np.nditer(side_im, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
axA = it.multi_index[0]
axB = it.multi_index[1]
c = it.multi_index[2]
z = -axA + HSIZE
if i == 0:
x = HSIZE
y = -axB + HSIZE
elif i == 1:
x = -HSIZE
y = axB - HSIZE
elif i == 2:
x = axB - HSIZE
y = HSIZE
elif i == 3:
x = -axB + HSIZE
y = -HSIZE
elif i == 4:
z = HSIZE
x = axB - HSIZE
y = axA - HSIZE
elif i == 5:
z = -HSIZE
x = axB - HSIZE
y = -axA + HSIZE
r = math.sqrt(float(x*x + y*y + z*z))
theta = math.acos(float(z)/r)
phi = math.atan2(float(y),x)
ix = (im.shape[1]-1)*phi/(2*math.pi)
iy = (im.shape[0]-1)*(theta)/math.pi
it[0] = im[iy, ix, c]
it.iternext()
misc.imsave(os.path.join(args.dir, "%s%d.%s"%(args.prefix,i,args.type)), side_im)
#Children Exit here
sys.exit(0)
os.waitpid(-1, 0)
|
Python
| 0.000004
|
@@ -551,24 +551,150 @@
put files%22)%0A
+parser.add_argument(%22--onefile%22, help=%22Save output as one concatenated file, still uses intermediate files as temp storage.%22)%0A
parser.add_a
@@ -738,16 +738,16 @@
file%22)%0A
-
%0Aargs =
@@ -882,17 +882,26 @@
.uint8)%0A
+pids = %5B%5D
%0A
-
for i in
@@ -933,16 +933,16 @@
.fork()%0A
-
if p
@@ -946,24 +946,49 @@
f pid != 0:%0A
+ pids.append(pid)%0A
cont
@@ -2130,27 +2130,516 @@
)%0A%0A%0A
- %0Aos.waitpid(-1, 0
+# Thise seems to work better than waitpid(-1, 0), in that case sometimes the%0A# files still don't exist and we get an error.%0Afor pid in pids: %0A os.waitpid(pid, 0)%0A%0Aif args.onefile:%0A ifiles = %5B%5D%0A for i in range(0,6):%0A ifiles.append(misc.imread(os.path.join(args.dir, %22%25s%25d.%25s%22%25(args.prefix,i,args.type))))%0A onefile = np.concatenate(ifiles, axis=1)%0A misc.imsave(args.onefile, onefile) %0A for i in range(0,6):%0A os.unlink(os.path.join(args.dir, %22%25s%25d.%25s%22%25(args.prefix,i,args.type))
)%0A
|
75cb305c025ca3549c721faacb5ea51297c80052
|
Use GitPython
|
buster.py
|
buster.py
|
"""Ghost Buster. Static site generator for Ghost.
Usage:
buster.py generate [--domain=<local-address>]
buster.py preview
buster.py setup [--gh-repo=<repo-url>]
buster.py deploy
buster.py (-h | --help)
buster.py --version
Options:
-h --help Show this screen.
--version Show version.
--domain=<local-address> Address of local ghost installation [default: local.tryghost.org].
--gh-repo=<repo-url> URL of your gh-pages repository.
"""
# XXX Assume static dir to be current dir if not specified in args
import os
import re
import shutil
import SocketServer
import SimpleHTTPServer
from docopt import docopt
from time import gmtime, strftime
STATIC_DIR = 'static'
arguments = docopt(__doc__, version='0.1')
static_path = os.path.join(os.path.dirname(__file__), STATIC_DIR)
if arguments['generate']:
command = ("wget \\"
"--recursive \\" # follow links to download entire site
"--page-requisites \\" # grab everything: css / inlined images
"--domains {0} \\" # don't grab anything outside ghost
"--no-parent \\" # don't go to parent level
"--directory-prefix {1} \\" # download contents to static/ folder
"--no-host-directories \\" # don't create domain named folder
"{0}").format(arguments['--domain'], STATIC_DIR)
os.system(command)
elif arguments['preview']:
os.chdir(static_path)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", 9000), Handler)
print "Serving at port 9000"
# gracefully handle interrupt here
httpd.serve_forever()
elif arguments['setup']:
if arguments['--gh-repo']:
repo_url = arguments['--gh-repo']
else:
repo_url = raw_input("Enter the Github repository URL:\n").strip()
# Create a fresh new static files directory
if os.path.isdir(static_path):
confirm = raw_input("This will destroy everything inside static/."
" Are you sure you want to continue? (y/N)").strip()
if confirm != 'y' or confirm != 'Y':
sys.exit(0)
shutil.rmtree(static_path)
os.mkdir(static_path)
os.chdir(static_path)
# User/Organization page -> master branch
# Project page -> gh-pages branch
branch = 'gh-pages'
regex = re.compile(".*[\w-]+\.github\.(?:io|com).*")
if regex.match(repo_url):
branch = 'master'
# Prepare git repository
os.system("git init")
if branch == 'gh-pages':
os.system("git checkout -b gh-pages")
os.system("git remote add origin {}".format(repo_url))
print "All set! You can generate and deploy now."
elif arguments['deploy']:
os.chdir(static_path)
os.system("git add -A .")
current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
os.system("git commit -m 'Blog update at {}'".format(current_time))
os.system("git push origin {}".format(branch))
print "Good job! Deployed to Github Pages."
elif arguments['domain']:
pass
|
Python
| 0.000001
|
@@ -98,16 +98,31 @@
ddress%3E%5D
+ %5B--dir=%3Cpath%3E%5D
%0A buste
@@ -133,16 +133,31 @@
preview
+ %5B--dir=%3Cpath%3E%5D
%0A buste
@@ -189,16 +189,31 @@
po-url%3E%5D
+ %5B--dir=%3Cpath%3E%5D
%0A buste
@@ -223,16 +223,31 @@
y deploy
+ %5B--dir=%3Cpath%3E%5D
%0A buste
@@ -385,16 +385,85 @@
ersion.%0A
+ --dir=%3Cpath%3E Path of directory to store static pages.%0A
--doma
@@ -617,75 +617,8 @@
%0A%22%22%22
-%0A# XXX Assume static dir to be current dir if not specified in args
%0A%0Aim
@@ -757,30 +757,28 @@
ime%0A
-%0ASTATIC_DIR = 'static'
+from git import Repo
%0A%0Aar
@@ -818,27 +818,93 @@
='0.1')%0A
-static_path
+if arguments%5B'dir'%5D:%0A STATIC_PATH = arguments%5B'dir'%5D%0Aelse:%0A STATIC_PATH
= os.pa
@@ -938,26 +938,24 @@
ile__),
-STATIC_DIR
+'static'
)%0A%0Aif ar
@@ -1574,11 +1574,12 @@
TIC_
-DIR
+PATH
)%0A%0A
@@ -1637,35 +1637,35 @@
os.chdir(
-static_path
+STATIC_PATH
)%0A%0A Handl
@@ -2120,27 +2120,27 @@
h.isdir(
-static_path
+STATIC_PATH
):%0A
@@ -2380,72 +2380,19 @@
ree(
-static_path)%0A%0A os.mkdir(static_path)%0A os.chdir(static_path
+STATIC_PATH
)%0A%0A
@@ -2649,29 +2649,57 @@
-os.system(%22git init%22)
+repo = Repo.init(STATIC_PATH)%0A git = repo.git%0A
%0A
@@ -2736,31 +2736,20 @@
-os.system(%22
git
-
+.
checkout
-b
@@ -2748,20 +2748,20 @@
kout
- -b
+(b='
gh-pages
%22)%0A
@@ -2760,59 +2760,44 @@
ages
-%22
+'
)%0A
-os.system(%22git
+repo.create_
remote
- add
+('
origin
- %7B%7D%22.format(
+',
repo
@@ -2801,17 +2801,16 @@
epo_url)
-)
%0A%0A pr
@@ -2892,58 +2892,73 @@
-os.chdir(static_path)%0A os.system(%22git add -A .%22
+repo = Repo(STATIC_PATH)%0A index = repo.index%0A index.add('.'
)%0A%0A
@@ -3023,33 +3023,21 @@
-os.system(%22git
+index.
commit
- -m
+(
'Blo
@@ -3051,17 +3051,16 @@
e at %7B%7D'
-%22
.format(
@@ -3075,16 +3075,66 @@
time))%0A%0A
+ origin = repo.remote.origin%0A origin.push()%0A
os.s
|
0c453d48e3d1b0dd80acffeae122c797335454f7
|
Fix setting cache dir
|
pw_env_setup/py/pw_env_setup/cipd_setup/update.py
|
pw_env_setup/py/pw_env_setup/cipd_setup/update.py
|
#!/usr/bin/env python
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Installs or updates prebuilt tools.
Must be tested with Python 2 and Python 3.
The stdout of this script is meant to be executed by the invoking shell.
"""
from __future__ import print_function
import argparse
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
def parse(argv=None):
"""Parse arguments."""
script_root = os.path.join(os.environ['PW_ROOT'], 'pw_env_setup', 'py',
'pw_env_setup', 'cipd_setup')
git_root = subprocess.check_output(
('git', 'rev-parse', '--show-toplevel'),
cwd=script_root,
).decode('utf-8').strip()
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument(
'--install-dir',
dest='root_install_dir',
default=os.path.join(git_root, '.cipd'),
)
parser.add_argument('--package-file',
dest='package_files',
metavar='PACKAGE_FILE',
action='append')
parser.add_argument('--cipd',
default=os.path.join(script_root, 'wrapper.py'))
parser.add_argument('--cache-dir',
default=os.environ.get(
'CIPD_CACHE_DIR',
os.path.expanduser('~/.cipd-cache-dir')))
return parser.parse_args(argv)
def check_auth(cipd, package_files):
"""Check have access to CIPD pigweed directory."""
paths = []
for package_file in package_files:
with open(package_file, 'r') as ins:
# This is an expensive RPC, so only check the first few entries
# in each file.
for i, entry in enumerate(json.load(ins)):
if i >= 3:
break
parts = entry['path'].split('/')
while '${' in parts[-1]:
parts.pop(-1)
paths.append('/'.join(parts))
try:
output = subprocess.check_output([cipd, 'auth-info'],
stderr=subprocess.STDOUT).decode()
logged_in = True
username = None
match = re.search(r'Logged in as (\S*)\.', output)
if match:
username = match.group(1)
except subprocess.CalledProcessError:
logged_in = False
for path in paths:
# Not catching CalledProcessError because 'cipd ls' seems to never
# return an error code unless it can't reach the CIPD server.
output = subprocess.check_output([cipd, 'ls', path],
stderr=subprocess.STDOUT).decode()
if 'No matching packages' not in output:
continue
# 'cipd ls' only lists sub-packages but ignores any packages at the
# given path. 'cipd instances' will give versions of that package.
# 'cipd instances' does use an error code if there's no such package or
# that package is inaccessible.
try:
subprocess.check_output([cipd, 'instances', path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
stderr = lambda *args: print(*args, file=sys.stderr)
stderr()
stderr('=' * 60)
stderr('ERROR: no access to CIPD path "{}"'.format(path))
if logged_in:
username_part = ''
if username:
username_part = '({}) '.format(username)
stderr('Your account {}does not have access to this '
'path'.format(username_part))
else:
stderr('Try logging in with this command:')
stderr()
stderr(' {} auth-login'.format(cipd))
stderr('=' * 60)
return False
return True
def write_ensure_file(package_file, ensure_file):
with open(package_file, 'r') as ins:
data = json.load(ins)
# TODO(pwbug/103) Remove 30 days after bug fixed.
if os.path.isdir(ensure_file):
shutil.rmtree(ensure_file)
with open(ensure_file, 'w') as outs:
outs.write('$VerifiedPlatform linux-amd64\n'
'$VerifiedPlatform mac-amd64\n'
'$ParanoidMode CheckPresence\n')
for entry in data:
outs.write('@Subdir {}\n'.format(entry.get('subdir', '')))
outs.write('{} {}\n'.format(entry['path'],
' '.join(entry['tags'])))
def update(
cipd,
package_files,
root_install_dir,
cache_dir,
env_vars=None,
):
"""Grab the tools listed in ensure_files."""
if not check_auth(cipd, package_files):
return False
# TODO(mohrr) use os.makedirs(..., exist_ok=True).
if not os.path.isdir(root_install_dir):
os.makedirs(root_install_dir)
if env_vars:
env_vars.prepend('PATH', root_install_dir)
env_vars.set('PW_CIPD_INSTALL_DIR', root_install_dir)
env_vars.set('CIPD_CACHE_DIR', cache_dir)
pw_root = None
if env_vars:
pw_root = env_vars.get('PW_ROOT', None)
if not pw_root:
pw_root = os.environ['PW_ROOT']
# Run cipd for each json file.
for package_file in package_files:
if os.path.splitext(package_file)[1] == '.ensure':
ensure_file = package_file
else:
ensure_file = os.path.join(
root_install_dir,
os.path.basename(
os.path.splitext(package_file)[0] + '.ensure'))
write_ensure_file(package_file, ensure_file)
install_dir = os.path.join(
root_install_dir,
os.path.basename(os.path.splitext(package_file)[0]))
cmd = [
cipd,
'ensure',
'-ensure-file', ensure_file,
'-root', install_dir,
'-log-level', 'warning',
'-max-threads', '0', # 0 means use CPU count.
] # yapf: disable
# TODO(pwbug/135) Use function from common utility module.
with tempfile.TemporaryFile(mode='w+') as temp:
print(*cmd, file=temp)
try:
subprocess.check_call(cmd,
stdout=temp,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
temp.seek(0)
sys.stderr.write(temp.read())
raise
# Set environment variables so tools can later find things under, for
# example, 'share'.
name = os.path.basename(install_dir)
if env_vars:
# Some executables get installed at top-level and some get
# installed under 'bin'.
env_vars.prepend('PATH', install_dir)
env_vars.prepend('PATH', os.path.join(install_dir, 'bin'))
env_vars.set('PW_{}_CIPD_INSTALL_DIR'.format(name.upper()),
install_dir)
# Windows has its own special toolchain.
if os.name == 'nt':
env_vars.prepend('PATH',
os.path.join(install_dir, 'mingw64', 'bin'))
return True
if __name__ == '__main__':
update(**vars(parse()))
sys.exit(0)
|
Python
| 0.000004
|
@@ -6524,16 +6524,53 @@
rning',%0A
+ '-cache-dir', cache_dir,%0A
|
d5b7647f848e351cafa4082dde76c90c750fa0fe
|
Add trigger id and location in BigQueryFuture result so subsequent task can query job status and result.
|
py/gps_building_blocks/cloud/workflows/futures.py
|
py/gps_building_blocks/cloud/workflows/futures.py
|
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Future: the return type for async tasks.
"""
import time
from typing import Any, Mapping, Optional
from googleapiclient import discovery
import google.auth
class Result:
"""Wrapper for results of async tasks."""
def __init__(self, trigger_id: str, is_success: bool,
result: Optional[Any] = None, error: Optional[Any] = None):
"""Initializes the Result object.
Args:
trigger_id: The id associated with the async task. Needs to be unique
across whole workflow.
is_success: Is the task successfully finished.
result: The result of the task.
error: The error, typically a string message.
"""
self.trigger_id = trigger_id
self.is_success = is_success
self.result = result
self.error = error
class Future:
"""Return type for async tasks."""
all_futures = []
def __init_subclass__(cls, **kwargs):
"""Adds future subclass to the list of all available future classes.
Args:
**kwargs: other args
"""
super().__init_subclass__(**kwargs)
cls.all_futures.append(cls)
def __init__(self, trigger_id: str):
"""Initializes the Future object.
Args:
trigger_id: The trigger id to be associated with this async task. This id
is used to trigger the corresponding function flow task to be
marked as finished. For example, in a BigQuery job, the job id
can be used as a trigger id.
"""
self.trigger_id = trigger_id
@classmethod
def handle_message(cls, message: Mapping[str, Any]) -> Optional[Result]:
"""Handles the external message(event).
This method needs to be overwritten by subclasses.
Args:
message: The message dict to be handled.
Returns:
A Result object, if the message can be parsed and handled, or None if the
message is ignored.
"""
raise NotImplementedError('Please implement class method handle_message!')
class BigQueryFuture(Future):
"""Return type for async big query task."""
@classmethod
def handle_message(cls, message: Mapping[str, Any]) -> Optional[Result]:
"""Handles bigquery task finish messages.
If the message is a bigquery message, then parse it and return its status,
otherwise just return None.
Args:
message: The message JSON dictionary.
Returns:
Parsed task result from the message or None.
"""
if _get_value(message, 'resource.type') == 'bigquery_resource':
bq_job_id = _get_value(
message,
'protoPayload.serviceData.jobCompletedEvent.job.jobName.jobId')
code = _get_value(message, 'protoPayload.status.code')
if code:
# The current behavior of BQ job status logs is empty status dict when
# no errors (in this case code will be None), and all error codes are
# non-zero.
error = _get_value(message, 'protoPayload.status.message')
return Result(trigger_id=bq_job_id, is_success=False, error=error)
else:
return Result(trigger_id=bq_job_id, is_success=True)
else:
return None
class DataFlowFuture(Future):
r"""Return type for async DataFlow task.
To use this future, you need to set up a log router that routes DataFlow job
complete logs into your PubSub topic for external messages. for example:
```
gcloud logging sinks create dataflow_complete_sink
pubsub.googleapis.com/projects/$PROJECT_ID/topics/$TOPIC_EXTERNAL \
--log-filter='resource.type="dataflow_step" AND textPayload="Worker pool
stopped."'
sink_service_account=$(gcloud logging sinks describe dataflow_complete_sink
|grep writerIdentity| sed 's/writerIdentity: //')
gcloud pubsub topics add-iam-policy-binding $TOPIC_EXTERNAL \
--member $sink_service_account --role roles/pubsub.publisher
```
Upon calling, future class extracts the DataFlow job id from the
"Worker pool stopped." logs and checks the job status using the Google Cloud
APIs.
"""
STATUS_CHECK_RETRY_TIMES = 10
STATUS_CHECK_SLEEP_SECS = 10
@classmethod
def handle_message(cls, message: Mapping[str, Any]) -> Optional[Result]:
"""Handles DataFlow task finish messages.
If the message is a DataFlow message, then parse it and return its status,
otherwise just return None.
Args:
message: The message JSON dictionary.
Returns:
Parsed task result from the message or None.
"""
if _get_value(message, 'resource.type') == 'dataflow_step':
labels = _get_value(message, 'resource.labels')
job_id = labels['job_id']
region = labels['region']
job_name = labels['job_name']
_, project = google.auth.default()
dataflow = discovery.build('dataflow', 'v1b3')
request = dataflow.projects().locations().jobs().get(
jobId=job_id,
location=region,
projectId=project)
retry = cls.STATUS_CHECK_RETRY_TIMES
while retry > 0:
retry -= 1
response = request.execute()
if response['currentState'] == 'JOB_STATE_DONE':
return Result(trigger_id=job_id, is_success=True)
elif response['currentState'] == 'JOB_STATE_RUNNING':
time.sleep(cls.STATUS_CHECK_SLEEP_SECS)
else:
error = {
'job_id': job_id,
'job_name': job_name,
'state': response['currentState']
}
return Result(trigger_id=job_id, is_success=False, error=error)
# Returns timeout error if running out of retries
error = {
'job_id': job_id,
'job_name': job_name,
'state': 'TIMEOUT CHECKING'
}
return Result(trigger_id=job_id, is_success=False, error=error)
else:
return None
def _get_value(obj: Mapping[str, Any], keypath: str):
"""Gets a value from a dictionary using dot-separated keys.
Args:
obj: A dictionary, which can be multi-level.
keypath: Keys separated by dot.
Returns:
Value from the dictionary using multiple keys in order, for example
`_get_value(d, 'a.b.c')` is equivalent to `d['a']['b']['c']`. If the key
does not exist at any level, return None.
"""
try:
for key in keypath.split('.'):
obj = obj[key]
except KeyError:
return None
return obj
|
Python
| 0.000003
|
@@ -3202,16 +3202,141 @@
jobId')%0A
+ location = _get_value(%0A message,%0A 'protoPayload.serviceData.jobCompletedEvent.job.jobName.location')%0A
co
@@ -3741,36 +3741,112 @@
:%0A re
-turn Result(
+sult = %7B'job_id': bq_job_id, 'location': location%7D%0A return Result(result=result,
trigger_id=b
|
a0afdc5f38c237918b2bb6906c977e83ba1574a0
|
allow to define a mandatory output extension
|
carpet.py
|
carpet.py
|
import tempfile
import os
class TempFileContext:
remove_at_exit = True
removable_files = []
"""
Base class to create 'with' contexts.
The __init__ method must define:
- self.removable_files <list>. This list will hold a list of filenames which will
removed at the end of the context, or when calling self.delete().
- self.tempfile <string>. Temporary file of interest, returned by "with" statement.
"""
def __init__(self, file_extension=""):
self.tempfile = tempfile.mktemp() + file_extension
def __enter__(self):
return self.tempfile
def __exit__(self, exc_type, exc_val, exc_tb):
if self.remove_at_exit:
self.delete()
else:
self.remove_intermediate_files()
def delete(self):
self.remove_tempfile()
self.remove_intermediate_files()
def remove_tempfile(self):
os.remove(self.tempfile)
def remove_intermediate_files(self):
map(os.remove, self.removable_files)
def create_context_class(core_function):
"""
This function is used to create context classes using a function
(core_function) to provide some functionality.
By context class I mean classes that can be used like this:
with ContextClass(whatever) as something:
do whatever with "something"
where "something" is only available within the scope of the with block.
core_function must accept at least two arguments:
- input file -> input file to process
- output fila -> output file of the processing
A typical example of core_function will be a function that transforms
between two data formats. For example, say we have a function jpg2png.
Normally we would use it this way: jpg2png("photo.jpg", "photo.png"). Now
suppose that we only need "photo.png" for a temporal step in a pipeline. In
this case we would have to care about choosing a location for "photo.png",
and about deleting it at the end.
Context classes allow to handle the temporal storage and removal, and would
be used like this:
with Jpg2Png("photo.jpg") as tmp_png_file:
do_whatever(tmp_png_file)...
Here tmp_png_file would be a pathname refering to a .png file created from
our "photo.jpg". We can use this pathname to open it, copy it, process it,
etc. without taking care of where it is. Also, once we exit the 'with'
block it will be deleted and we won't have to care about it anymore.
"""
# This is our mold of Context Class :)
class GenericContextClass(TempFileContext):
def __init__(self, *args, **kwargs):
self.removable_files = []
self.tempfile = tempfile.mktemp()
core_function(args[0], self.tempfile, *args[1:], **kwargs)
self.remove_at_exit = True
return GenericContextClass
|
Python
| 0.000001
|
@@ -1060,16 +1060,37 @@
function
+, output_extension=%22%22
):%0A %22
@@ -2750,16 +2750,41 @@
mktemp()
+ + %22.%22 + output_extension
%0A
|
db67db3cea880e40d1982149fea86699c15b5f75
|
change append to add (for the set in part 1)
|
day3.py
|
day3.py
|
#!/usr/local/bin/python3
from collections import namedtuple
with open('day3_input.txt') as f:
instructions = f.read().rstrip()
Point = namedtuple('Point', ['x', 'y'])
location = Point(0, 0)
visited = {location}
def new_loc(current_loc, instruction):
if instruction == '^':
xy = current_loc.x, current_loc.y + 1
elif instruction == 'v':
xy = current_loc.x, current_loc.y - 1
elif instruction == '>':
xy = current_loc.x + 1, current_loc.y
elif instruction == '<':
xy = current_loc.x - 1, current_loc.y
return Point(*xy)
for char in instructions:
location = new_loc(location, char)
visited.append(location)
print('At least one present:', len(visited))
# Part two
santa_loc = Point(0, 0)
robo_loc = Point(0, 0)
visited = {santa_loc, robo_loc}
for idx, char in enumerate(instructions):
if idx % 2 == 0: # Santa
santa_loc = new_loc(santa_loc, char)
visited.add(santa_loc)
else: # robot
robo_loc = new_loc(robo_loc, char)
visited.add(robo_loc)
print('At least one present with santa and robot:', len(visited))
|
Python
| 0
|
@@ -656,12 +656,9 @@
ed.a
-ppen
+d
d(lo
|
ea90bf0b009118c108f39a15f4eb64f32c4e1eee
|
Resolve #23 that resizes text boxes according to number of lines. 10 max.
|
days.py
|
days.py
|
from Tkinter import *
import ncore
class VerticalScrolledFrame(Frame):
"""A pure Tkinter scrollable frame that actually works!
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
def __init__(self, parent, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = Scrollbar(self, orient=VERTICAL)
vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
canvas = Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set)
canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = Frame(canvas)
interior_id = canvas.create_window(0, 0, window=interior,
anchor=NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
canvas.config(scrollregion="0 0 %s %s" % size)
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the canvas's width to fit the inner frame
canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
def _configure_canvas(event):
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the inner frame's width to fill the canvas
canvas.itemconfigure(interior_id, width=canvas.winfo_width())
canvas.bind('<Configure>', _configure_canvas)
class Days(Frame):
def __init__(self, master=None, nc=None, project=None, limit=3):
self.m = master
self.nc = nc
self.project=project
self.gui_fill()
self.days = []
self.limit = limit
limit = (limit+1) *-1 #Gets the previous two days and this one.
l = sorted(self.nc.get_all_dates(project))[limit:]
for day in l:
self.days.append(Day(self, day))
self._repack()
def gui_fill(self):
self.f = Frame.__init__(self, self.m)
self.pack()
self.frame = VerticalScrolledFrame(self.f)
self.frame.pack()
Button(self.frame.interior, text="More", command=self._more).pack()
def _more(self, event=None):
"""Adds 3 more days to the beginning of the list."""
tlist = []
limit = (self.limit+4) *-1
oldlimit = (self.limit+1) * -1
self.limit += 3
l = sorted(self.nc.get_all_dates(self.project))[limit:oldlimit]
for day in self.days:
day.gui_forget()
for day in l:
tlist.append(Day(self, day))
self.days = tlist + self.days
self._repack()
def _repack(self):
for day in self.days:
day.gui_pack()
self.days[-1].text.yview(END)
class Day(object):
def __init__(self, parent=None, date=None):
self.date = date
self.p = parent
self.gui_fill()
def gui_fill(self):
"""Fills gui."""
self.text = Text(self.p.frame.interior, height=10, width=77,
wrap=WORD, bg='light blue', spacing1=5)
for row in self.p.nc.print_project_day(self.p.project, self.date):
s = str(row[0]) + ' ' + str(row[1]) + ' ' + str(row[3]) + '\n'
self.text.insert(END, s)
def gui_forget(self):
"""For pack forgetting"""
self.text.pack_forget()
def gui_pack(self):
"""For packing"""
self.text.pack()
if __name__=="__main__":
path = "/home/bgorges/Tools/noteTaker"
root = Tk()
nc = ncore.noteCore(dbpath=path)
app = Days(master=root, nc=nc, project="Other", limit=2)
app.mainloop()
|
Python
| 0
|
@@ -2763,32 +2763,65 @@
._more).pack()%0A%0A
+ def add(self):%0A pass%0A%0A
def _more(se
@@ -2829,32 +2829,32 @@
f, event=None):%0A
-
%22%22%22Adds
@@ -3382,18 +3382,136 @@
ext.
-yview(END)
+config(height=10) #For better entry.%0A self.days%5B-1%5D.text.yview(END) #maybe make these two statements%0A #a decorator
%0A%0A%0Ac
@@ -3833,16 +3833,34 @@
ing1=5)%0A
+ lines = 0%0A
@@ -4037,16 +4037,125 @@
(END, s)
+%0A lines += self.count_lines(s)%0A if lines %3C 10:%0A self.text.config(height=lines+1)
%0A%0A de
@@ -4287,24 +4287,24 @@
packing%22%22%22%0A
-
self
@@ -4317,16 +4317,273 @@
pack()%0A%0A
+ def count_lines(self, s):%0A lines = s.split(%22%5Cn%22)%0A count = len(lines) - 1%0A for row in lines:%0A l = len(row)/77%0A if l %3C 1.0:%0A continue%0A count += int(l)%0A return count%0A %0A%0A
if __nam
|
2bcdf5e6e2e7bbf7113f3cbb1fce18ed778b9b62
|
Update conductance calculation code.
|
reveal_user_classification/quality/conductance.py
|
reveal_user_classification/quality/conductance.py
|
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
def conductance(adjacency_matrix, node_array):
number_of_nodes = adjacency_matrix.shape[0]
node_array_bar = np.setdiff1d(np.arange(number_of_nodes), node_array)
submatrix = adjacency_matrix[np.ix_(node_array, node_array)]
submatrix_bar = adjacency_matrix[np.ix_(node_array_bar, node_array_bar)]
submatrix_volume = submatrix.getnnz() # TODO: If empty?
submatrix_bar_volume = submatrix_bar.getnnz() # TODO: If empty?
matrix_volume = adjacency_matrix.getnnz()
cut_volume = (matrix_volume - submatrix_volume - submatrix_bar_volume)/2
try:
cut_conductance = cut_volume/min(submatrix_volume, submatrix_bar_volume)
except ZeroDivisionError:
cut_conductance = np.Inf
return cut_conductance
def conductance_and_clustering_coefficient(adjacency_matrix, node_array, seed_node):
number_of_nodes = adjacency_matrix.shape[0]
node_array_bar = np.setdiff1d(np.arange(number_of_nodes), node_array)
submatrix = adjacency_matrix[np.ix_(node_array, node_array)]
submatrix_bar = adjacency_matrix[np.ix_(node_array_bar, node_array_bar)]
submatrix_volume = submatrix.getnnz() # TODO: If empty?
submatrix_bar_volume = submatrix_bar.getnnz() # TODO: If empty?
matrix_volume = adjacency_matrix.getnnz()
cut_volume = (matrix_volume - submatrix_volume - submatrix_bar_volume)/2
cut_conductance = cut_volume/min(submatrix_volume, submatrix_bar_volume)
new_node_array = np.setdiff1d(node_array, seed_node)
clustering_coefficient = adjacency_matrix[np.ix_(new_node_array, new_node_array)]
clustering_coefficient = clustering_coefficient.getnnz()/(new_node_array.size*new_node_array.size)
return cut_conductance, clustering_coefficient
def fast_conductance(array_of_arrays, node_array, matrix_volume):
submatrix_volume = 0
cut_volume = 0
for node in node_array:
neighbors = array_of_arrays[node]
degree = neighbors.size
common = np.intersect1d(node_array, neighbors).size
submatrix_volume += common
cut_volume += degree - common
submatrix_bar_volume = matrix_volume - submatrix_volume - 2*cut_volume
try:
cut_conductance = cut_volume/min(submatrix_volume, submatrix_bar_volume)
except ZeroDivisionError:
cut_conductance = np.Inf
return cut_conductance, cut_volume, submatrix_volume
def incremental_conductance(array_of_arrays, node_array, new_node, cut_volume, submatrix_volume, matrix_volume):
# TODO: What if I have ones in the diagonal?
neighbors = array_of_arrays[new_node]
degree = neighbors.size
common = np.intersect1d(node_array, neighbors).size
submatrix_volume += common
cut_volume += degree - common
submatrix_bar_volume = matrix_volume - submatrix_volume - 2*cut_volume
try:
cut_conductance = cut_volume/min(submatrix_volume, submatrix_bar_volume)
except ZeroDivisionError:
cut_conductance = np.Inf
return cut_conductance, cut_volume, submatrix_volume
def decremental_conductance(array_of_arrays, node_array, new_node, cut_volume, submatrix_volume, matrix_volume):
# TODO: What if I have ones in the diagonal?
neighbors = array_of_arrays[new_node]
degree = neighbors.size
common = np.intersect1d(node_array, neighbors).size
submatrix_volume -= common
cut_volume += common - degree
submatrix_bar_volume = matrix_volume - submatrix_volume - 2*cut_volume
try:
cut_conductance = cut_volume/min(submatrix_volume, submatrix_bar_volume)
except ZeroDivisionError:
cut_conductance = np.Inf
return cut_conductance, cut_volume, submatrix_volume
|
Python
| 0
|
@@ -3703,28 +3703,29 @@
cut_volume, submatrix_volume
+%0A
|
40ea261dbb53524ace1b371510d9e4f04772d8b9
|
change called method to confirm sale orders
|
sale_automatic_workflow/automatic_workflow_job.py
|
sale_automatic_workflow/automatic_workflow_job.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# sale_automatic_workflow for OpenERP
# Copyright (C) 2011 Akretion Sébastien BEAU <sebastien.beau@akretion.com>
# Copyright 2013 Camptocamp SA (Guewen Baconnier)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""
Some comments about the implementation
In order to validate the invoice and the picking, we have to use
scheduled actions, because if we directly jump the various steps in the
workflow of the invoice and the picking, the sale order workflow will be
broken.
The explanation is 'simple'. Example with the invoice workflow: When we
are in the sale order at the workflow router, a transition like a signal
or condition will change the step of the workflow to the step 'invoice';
this step will launch the creation of the invoice. If the invoice is
directly validated and reconciled with the payment, the subworkflow will
end and send a signal to the sale order workflow. The problem is that
the sale order workflow has not yet finished to apply the step 'invoice',
so the signal of the subworkflow will be lost because the step 'invoice'
is still not finished. The step invoice should be finished before
receiving the signal. This means that we can not directly validate every
steps of the workflow in the same transaction.
If my explanation is not clear, contact me by email and I will improve
it: sebastien.beau@akretion.com
"""
import logging
from contextlib import contextmanager
from openerp import models, api
_logger = logging.getLogger(__name__)
@contextmanager
def commit(cr):
"""
Commit the cursor after the ``yield``, or rollback it if an
exception occurs.
Warning: using this method, the exceptions are logged then discarded.
"""
try:
yield
except Exception:
cr.rollback()
_logger.exception('Error during an automatic workflow action.')
else:
cr.commit()
class AutomaticWorkflowJob(models.Model):
""" Scheduler that will play automatically the validation of
invoices, pickings... """
_name = 'automatic.workflow.job'
@api.model
def _get_domain_for_sale_validation(self):
return [('state', '=', 'draft'),
('workflow_process_id.validate_order', '=', True)]
@api.model
def _validate_sale_orders(self):
sale_obj = self.env['sale.order']
sales = sale_obj.search(self._get_domain_for_sale_validation())
_logger.debug('Sale Orders to validate: %s', sales)
for sale in sales:
with commit(self.env.cr):
sale.signal_workflow('order_confirm')
@api.model
def _validate_invoices(self):
invoice_obj = self.env['account.invoice']
invoices = invoice_obj.search(
[('state', 'in', ['draft']),
('workflow_process_id.validate_invoice', '=', True)],
)
_logger.debug('Invoices to validate: %s', invoices)
for invoice in invoices:
with commit(self.env.cr):
invoice.signal_workflow('invoice_open')
@api.model
def _validate_pickings(self):
picking_obj = self.env['stock.picking']
pickings = picking_obj.search(
[('state', 'in', ['draft', 'confirmed', 'assigned']),
('workflow_process_id.validate_picking', '=', True)],
)
_logger.debug('Pickings to validate: %s', pickings)
if pickings:
with commit(self.env.cr):
pickings.validate_picking()
@api.model
def run(self):
""" Must be called from ir.cron """
self._validate_sale_orders()
self._validate_invoices()
self._validate_pickings()
return True
|
Python
| 0
|
@@ -3348,38 +3348,29 @@
ale.
-signal_workflow('order
+action_button
_confirm
')%0A%0A
@@ -3365,17 +3365,17 @@
_confirm
-'
+(
)%0A%0A @
|
ebcf06836d86bc6548531dcc4ac1cace018a3388
|
build index object
|
column.py
|
column.py
|
from __future__ import print_function
import os
import matplotlib as mpl
mpl.use("Agg")
from math import sqrt
import numpy as np
import math
from pandas.util.testing import DataFrame, Series
import matplotlib.pyplot as plt
import pandas as pd
import itertools
__author__ = 'dietz'
from argparse import ArgumentParser
# query metric value
# C09-1 ndcg 0.27478
# C09-1 ndcg5 0.47244
# C09-1 ndcg10 0.32972
# C09-1 ndcg20 0.25703
# C09-1 ERR 0.18652
# C09-1 ERR10 0.16907
# C09-1 ERR20 0.17581
# C09-1 P1 1.00000
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
tooldescription = """
Classic bar chart indicating the mean of values for the given
metric across all queries with error bars indicating the standard
error.
"""
parser = ArgumentParser(description=tooldescription)
parser.add_argument('--out', help='outputfilename', metavar='FILE', required=True)
parser.add_argument('--metric', help='metric for comparison', required=True)
parser.add_argument('--format', help='trec_eval output or galago_eval output', default='trec_eval')
parser.add_argument('-c', help='instead of average, also count non-existing queries', default=False, action='store_true')
parser.add_argument('--sort', help='sort methods in plot', action='store_true', default=False)
parser.add_argument(dest='runs', nargs='+', type=lambda x: is_valid_file(parser, x))
args = parser.parse_args()
numQueries_key = "num_q"
print("column.py metric="+args.metric+" out="+args.out)
def read_ssv(fname):
lines = [line.split() for line in open(fname, 'r')]
if args.format.lower() == 'galago_eval':
return lines
elif args.format.lower() == 'trec_eval':
return [[line[1], line[0]] + line[2:] for line in lines]
def readNumQueries(run):
tsv = read_ssv(run)
data = [int(row[2]) for row in tsv if row[0] == "all" and row[1] == numQueries_key]
return data[0]
def findQueriesWithNanValues(run):
tsv = read_ssv(run)
# print ("tsv,", tsv)
queriesWithNan = {row[0] for row in tsv if row[1] == 'num_rel' and (float(row[2]) == 0.0 or math.isnan(float(row[2])))}
return queriesWithNan
def fetchValues(run):
tsv = read_ssv(run)
data = {row[0]: float(row[2]) for row in tsv if row[1] == args.metric and not math.isnan(float(row[2]))}
return data
datas = {run: fetchValues(run) for run in args.runs}
# deal with nans
queriesWithNanValues = {'all'}.union(*[findQueriesWithNanValues(run) for run in args.runs])
basedata=datas[args.runs[0]]
queries = set(basedata.keys()).difference(queriesWithNanValues)
numQueries = readNumQueries(args.runs[0]) if args.c else len(queries)
seriesDict = {'mean':dict(), 'stderr':dict()}
for run in datas:
data = datas[run]
if sum(not key in data for key in queries) > 0:
print("data for run "+run+" does not contain all queries "+" ".join(queries))
mean = np.sum([data.get(key, 0.0) for key in queries]) / numQueries
stderr = np.std([data.get(key, 0.0) for key in queries] + ([0.0]* (numQueries - len(queries)))) / sqrt(numQueries)
seriesDict['mean'][run]=mean
seriesDict['stderr'][run]=stderr
print( "dropping queries because of NaN values: "+ " ".join(queriesWithNanValues))
print ('\t'.join(['run', 'mean/stderr']))
for run in datas:
#if not run == args.runs[0]:
print ('\t'.join([run, str(seriesDict['mean'][run]), str(seriesDict['stderr'][run])]))
df1 = DataFrame(seriesDict, index=args.runs)
if args.sort:
df1.sort_values('mean',ascending=False,inplace=True)
df2 = df1['mean']
df2.index=[os.path.basename(label) for label in df1.index]
df1.index=[os.path.basename(label) for label in df1.index]
print(plt.rcParams.get('axes.prop_cycle'))
cs = {k:v for k,v in zip(set([label[0:3] for label in df1.index]), itertools.cycle(['#ff0000aa','#00ffffaa'])) }
df1['color']=[cs[label[0:3]] for label in df1.index]
print(df1['color'])
plt.tick_params(colors=df1.color)
fig, ax = plt.subplots()
#plt.figure()
#df2.plot(kind='bar', yerr = df1['stderr'], color=['b','y','g'], ax=ax )
df2.plot.bar(yerr = df1['stderr'], color=df1.color.values, ax=ax)
ax.grid()
plt.ylabel(args.metric, fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=20)
plt.xticks(rotation=90)
plt.savefig(args.out, bbox_inches='tight')
# plt.show()
|
Python
| 0.000002
|
@@ -3496,16 +3496,25 @@
, index=
+pd.Index(
args.run
@@ -3515,16 +3515,17 @@
gs.runs)
+)
%0Aif args
|
db713e62eafb29c1a968e16b997a4e8f49156c78
|
Correct config for touchscreen
|
config.py
|
config.py
|
__author__ = 'Florian'
from util import get_lan_ip
#################
# CONFIGURATION #
#################
# CHANGE FROM HERE
#
UDP_PORT = 18877
IP = get_lan_ip()
BUF_SIZE = 4096
TIMEOUT_IN_SECONDS = 0.1
#
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
SCREEN_DEEP = 32
#
LABEL_RIGHT = 0
LABEL_LEFT = 1
ALIGN_CENTER = 0
ALIGN_RIGHT = 1
ALIGN_LEFT = 2
VALIGN_CENTER = 0
VALIGN_TOP = 1
VALIGN_BOTTOM = 2
#
# Stop changing. Of course - you can do, but it should not be necessary
#
FONT = 'assets/DroidSansMono.ttf'
# set up the colors
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
CYAN = ( 0, 255, 255)
MAGENTA= (255, 0, 255)
YELLOW = (255, 255, 0)
RPM_YELLOW = (230, 230, 40)
GREY = (214, 214, 214)
BACKGROUND_COLOR = BLACK
FOREGROUND_COLOR = WHITE
#
#
#
import os, sys
if sys.platform == 'darwin':
# Display on Laptop Screen on the left
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (-400,100)
#from datastream import MockBaseDataStream
#datastream = MockBaseDataStream()
from datastream import PDU1800DataStream
datastream = PDU1800DataStream(ip=IP, port=UDP_PORT)
elif sys.platform == 'linux2':
from evdev import InputDevice, list_devices
devices = map(InputDevice, list_devices())
eventX=""
for dev in devices:
if dev.name == "ADS7846 Touchscreen":
eventX = dev.fn
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
os.environ["SDL_MOUSEDEV"] = eventX
if os.path.isfile('/etc/pointercal'):
os.environ["TSLIB_CALIBFILE"] = '/etc/pointercal'
from datastream import PDU1800DataStream
datastream = PDU1800DataStream(ip=IP, port=UDP_PORT)
#
|
Python
| 0.000002
|
@@ -988,17 +988,16 @@
00)%0A
-#
from dat
@@ -1034,17 +1034,16 @@
eam%0A
-#
datastre
@@ -1068,24 +1068,25 @@
tream()%0A
+#
from datastr
@@ -1110,32 +1110,33 @@
0DataStream%0A
+#
datastream = PDU
@@ -1207,438 +1207,480 @@
2':%0A
-%0A from evdev import InputDevice, list_devices%0A%0A devices = map(InputDevice, list_devices())%0A eventX=%22%22%0A for dev in devices:%0A if dev.name == %22ADS7846 Touchscreen%22:%0A eventX = dev.fn%0A os.environ%5B%22SDL_FBDEV%22%5D = %22/dev/fb1%22%0A os.environ%5B%22SDL_MOUSEDRV%22%5D = %22TSLIB%22%0A os.environ%5B%22SDL_MOUSEDEV%22%5D = eventX%0A if os.path.isfile('/etc/pointercal'):%0A os.environ%5B%22TSLIB_CALIBFILE%22%5D = '/etc/pointercal'
+ if os.path.isfile('/etc/pointercal'):%0A os.environ%5B%22TSLIB_CALIBFILE%22%5D = '/etc/pointercal'%0A os.putenv('SDL_VIDEODRIVER', 'fbcon')%0A os.environ%5B%22SDL_FBDEV%22%5D = %22/dev/fb1%22%0A os.environ%5B%22SDL_MOUSEDRV%22%5D = %22TSLIB%22%0A from evdev import InputDevice, list_devices%0A%0A devices = map(InputDevice, list_devices())%0A eventX=%22%22%0A for dev in devices:%0A if dev.name == %22ADS7846 Touchscreen%22:%0A eventX = dev.fn%0A%0A os.environ%5B%22SDL_MOUSEDEV%22%5D = eventX
%0A
|
d5e8fe7e7dfe0c98e6c520e53032cbb67e1977e5
|
Update VAT value when set document_type = CUIT
|
config.py
|
config.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
import base64
from M2Crypto import X509
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
class l10n_ar_wsafip_fe_config(osv.osv_memory):
def _default_company(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, uid, uid, context).company_id.id
def update_data(self, cr, uid, ids, company_id, context=None):
journal_obj = self.pool.get('account.journal')
v = { 'journal_ids': journal_obj.search(cr, uid, [('company_id','=',company_id),
('journal_class_id','!=',False)]) }
return {'value': v}
def _get_journals(self, cr, uid, ids, field_name, arg, context=None):
journal_obj = self.pool.get('account.journal')
result = dict( (id, self.items) for id in ids )
return result
def _get_pos(self, cr, uid, context=None):
cr.execute("""
SELECT point_of_sale
FROM account_journal
WHERE point_of_sale is not Null
GROUP BY point_of_sale
ORDER BY point_of_sale
""")
items = [ ("%i" % i, _("Point of sale %i") % i) for i in cr.fetchall() ]
return items
def _set_journals(self, cr, uid, ids, field_name, field_value, fnct_inv_arg, context=None):
journal_obj = self.pool.get('account.journal')
self.items = field_value[0][2]
return True
def execute(self, cr, uid, ids, context=None):
"""
"""
conn_obj = self.pool.get('wsafip.connection')
journal_obj = self.pool.get('account.journal')
afipserver_obj = self.pool.get('wsafip.server')
sequence_obj = self.pool.get('ir.sequence')
for ws in self.browse(cr, uid, ids):
# Tomamos la compania
company = ws.company_id
conn_class = 'homologation' if ws.wsfe_for_homologation else 'production'
# Hay que crear la autorizacion para el servicio si no existe.
conn_ids = conn_obj.search(cr, uid, [('partner_id','=',company.partner_id.id)])
if len(conn_ids) == 0:
# Hay que crear la secuencia de proceso en batch si no existe.
seq_ids = sequence_obj.search(cr, uid, [('code','=','wsafip_fe_sequence')])
if seq_ids:
seq_id = seq_ids[0]
else:
seq_id = sequence_obj.create(cr, uid, {'name': 'Web Service AFIP Sequence for Invoices', 'code': 'ws_afip_sequence'})
# Crear el conector al AFIP
conn_id = conn_obj.create(cr, uid, {
'name': 'AFIP Sequence Authorization Invoice: %s' % company.name,
'partner_id': company.partner_id.id,
'logging_id': afipserver_obj.search(cr, uid, [('code','=','wsaa'),('class','=',conn_class)])[0],
'server_id': afipserver_obj.search(cr, uid, [('code','=','wsfe'),('class','=',conn_class)])[0],
'certificate': ws.wsfe_certificate_id.id,
'batch_sequence_id': seq_id,
})
else:
conn_id = conn_ids[0]
# Asigno el conector al AFIP
jou_ids = journal_obj.search(cr, uid, [('company_id','=',company.id),
('point_of_sale','=',ws.wsfe_point_of_sale),
('type','=','sale')])
journal_obj.write(cr, uid, jou_ids, { 'afip_connection_id': conn_id })
# Sincronizo el número de factura local con el remoto
for journal in journal_obj.browse(cr, uid, jou_ids):
remote_number = journal.afip_items_generated
seq_id = journal.sequence_id.id
if not type(remote_number) is bool:
_logger.info("Journal '%s' syncronized." % journal.name)
sequence_obj.write(cr, uid, seq_id, {'number_next': remote_number + 1})
else:
_logger.info("Journal '%s' cant be used." % journal.name)
return True
_name = 'l10n_ar_wsafip_fe.config'
_inherit = 'res.config'
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'wsfe_for_homologation': fields.boolean('Is for homologation'),
'wsfe_certificate_id': fields.many2one('crypto.certificate', 'Certificate', required=True),
'wsfe_point_of_sale': fields.selection(_get_pos, 'Point of Sale', required=True),
}
_defaults= {
'company_id': _default_company,
'wsfe_for_homologation': False,
}
l10n_ar_wsafip_fe_config()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000001
|
@@ -5396,80 +5396,8 @@
e),%0A
- 'wsfe_for_homologation': fields.boolean('Is for homologation'),%0A
@@ -5484,32 +5484,104 @@
required=True),%0A
+ 'wsfe_for_homologation': fields.boolean('Is for homologation'),%0A
'wsfe_po
|
68593e359d5bb79c096d584c83df1ff55262a686
|
use with
|
config.py
|
config.py
|
# coding=utf-8
from configparser import ConfigParser
import os
__author__ = 'Victor Häggqvist'
class Config:
confdir = os.path.dirname(os.path.realpath(__file__))
config_file = os.path.join(confdir, 'ledman.conf')
default = """
[gpio]
red=22
green=27
blue=17
[default_level]
red=0
green=0.3
blue=0.5
[server]
keys=testkeychangeme
"""
def __init__(self):
config = ConfigParser()
if not os.path.isfile(self.config_file):
self.init_config()
config.read(self.config_file)
self.GPIO_RED = config.get('gpio', 'red') # 22
self.GPIO_GREEN = config.get('gpio', 'green') # 27
self.GPIO_BLUE = config.get('gpio', 'blue') # 17
self.RED_DEFAULT = config.get('default_level', 'red') # 0
self.GREEN_DEFAULT = config.get('default_level', 'green') # 0.3
self.BLUE_DEFAULT = config.get('default_level', 'blue') # 0.5
keys = config.get('server', 'keys')
self.keys = []
for k in keys.split(','):
self.keys.append(k)
def init_config(self):
f = open(self.config_file, 'w+')
f.write(self.default)
f.close()
|
Python
| 0
|
@@ -1087,11 +1087,12 @@
-f =
+with
ope
@@ -1116,17 +1116,27 @@
e, 'w+')
-%0A
+ as f:%0A
@@ -1161,22 +1161,4 @@
lt)%0A
- f.close()%0A
|
e1f5744d9206a5a169f6997a6f3bb673ec8c6214
|
change googlemail de merde pour mailgun
|
config.py
|
config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = 'dummy_secret_key'
MAIL_SUBJECT_PREFIX = 'Socialite'
MAIL_SENDER = 'Socialite Team <team.socialite.app@gmail.com>'
ADMIN = ['delita.makanda@gmail.com']
POSTS_PER_PAGE = 10
FOLLOWERS_PER_PAGE = 20
COMMENTS_PER_PAGE = 10
SLOW_DB_QUERY_TIME = 0.5
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLATPAGES_EXTENSION = '.md'
FLATPAGES_MARKDOWN_EXTENSIONS = ['codehilite', 'headerid']
ADMINS = ['delita.makanda@gmail.com', 'makanda.delita@orange.fr']
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""docstring for DevelopmentConfig."""
DEBUG = True
FLATPAGES_AUTO_RELOAD = DEBUG
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.MAIL_SENDER,
toaddrs=[cls.ADMIN],
subject=cls.MAIL_SUBJECT_PREFIX + ' Application error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
|
Python
| 0
|
@@ -89,26 +89,36 @@
Y =
-'dummy_secret_key'
+os.environ.get('SECRET_KEY')
%0A%09MA
@@ -138,25 +138,25 @@
EFIX = '
-S
+s
ocialite
'%0A%09MAIL_
@@ -147,16 +147,20 @@
ocialite
+ app
'%0A%09MAIL_
@@ -187,39 +187,8 @@
Team
- %3Cteam.socialite.app@gmail.com%3E
'%0A%09A
@@ -469,38 +469,35 @@
VER = 'smtp.
-googlemail.com
+mailgun.org
'%0A%09MAIL_PORT
@@ -966,22 +966,19 @@
mtp.
-googlemail.com
+mailgun.org
'%0A%09M
|
0812ec319291b709613152e9e1d781671047a428
|
Make server ignore missing environment variables
|
config.py
|
config.py
|
import os
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
ACCESS_TOKEN = os.environ['ACCESS_TOKEN']
PAGE_ID = os.environ['PAGE_ID']
APP_ID = os.environ['APP_ID']
VERIFY_TOKEN = os.environ['VERIFY_TOKEN']
|
Python
| 0
|
@@ -40,17 +40,21 @@
.environ
-%5B
+.get(
'DATABAS
@@ -59,17 +59,30 @@
ASE_URL'
-%5D
+, 'sqlite://')
%0A%0AACCESS
@@ -100,17 +100,21 @@
.environ
-%5B
+.get(
'ACCESS_
@@ -119,17 +119,17 @@
S_TOKEN'
-%5D
+)
%0APAGE_ID
@@ -141,17 +141,21 @@
.environ
-%5B
+.get(
'PAGE_ID
@@ -155,17 +155,17 @@
PAGE_ID'
-%5D
+)
%0AAPP_ID
@@ -180,17 +180,21 @@
iron
-%5B
+.get(
'APP_ID'
%5D%0AVE
@@ -189,17 +189,17 @@
'APP_ID'
-%5D
+)
%0AVERIFY_
@@ -220,9 +220,13 @@
iron
-%5B
+.get(
'VER
@@ -239,6 +239,6 @@
KEN'
-%5D
+)
%0A
|
184cf0c253d8edf0eb9e19de54183086ca326951
|
Update sleepyoz_digitalclock3.py
|
mcpipy/sleepyoz_digitalclock3.py
|
mcpipy/sleepyoz_digitalclock3.py
|
#!/usr/bin/env python
# mcpipy.com retrieved from URL below, written by SleepyOz
# http://www.raspberrypi.org/phpBB3/viewtopic.php?f=32&t=33427
from .. import minecraft
from .. import block
import time
import server
"""
Dot matrix digits 5 wide by 8 high.
0 - voxel should be drawn
Anything else - voxel should be cleared
"""
digit_dots = {
'0':[
' 000',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
' 000',
],
'1':[
' 0',
' 00',
' 0',
' 0',
' 0',
' 0',
' 0',
' 000',
],
'2':[
' 000',
'0 0',
' 0',
' 0',
' 0',
'0',
'0',
'00000',
],
'3':[
' 000',
'0 0',
' 0',
' 00',
' 0',
' 0',
'0 0',
' 000',
],
'4':[
' 0',
' 00',
' 0 0',
'0 0',
'00000',
' 0',
' 0',
' 0',
],
'5':[
'00000',
'0',
'0',
'0000',
' 0',
' 0',
'0 0',
' 000',
],
'6':[
' 000',
'0 0',
'0',
'0000',
'0 0',
'0 0',
'0 0',
' 000',
],
'7':[
'00000',
' 0',
' 0',
' 0',
' 0',
'0',
'0',
'0',
],
'8':[
' 000',
'0 0',
'0 0',
' 000',
'0 0',
'0 0',
'0 0',
' 000',
],
'9':[
' 000',
'0 0',
'0 0',
' 0000',
' 0',
' 0',
'0 0',
' 000',
],
':':[
'',
'',
' 00',
' 00',
'',
' 00',
' 00',
'',
],
}
class buffer:
"""
Double-buffer a voxel message for Minecraft.
To improve performance, only changes are rendered.
"""
anchor_position = minecraft.Vec3(0,0,0)
last_message = ''
offscreen = []
onscreen = []
unset = block.OBSIDIAN
set = block.SNOW_BLOCK
def __init__(self, anchor_position):
"""
Set everything up to render messages into the world
at the given position.
"""
self.anchor_position = anchor_position
def draw_base(self, client):
"""
Build some foundations for the clock.
"""
# Foundations of stone.
for y in range(-5, -1): # Nice thick base.
for x in range(-1, 8*6): # 8 digits each 6 wide, plus a border, minus one to cut out the space beside the last digit.
for z in range(-1, 8+1): # Each digit is 8 high, plus a border.
client.setBlock(self.anchor_position.x+x, self.anchor_position.y+y, self.anchor_position.z+z, block.STONE)
# Shallow pool at the top.
for x in range(0, 8*6-1):
for z in range(0, 8):
client.setBlock(self.anchor_position.x+x, self.anchor_position.y-2, self.anchor_position.z+z, block.WATER_STATIONARY)
def render(self, message):
"""
Put message into the off-screen buffer.
"""
if message != self.last_message: # Do nothing if the message has not changed.
self.last_message = message # For next time.
self.offscreen = [] # Clear any previous use of the buffer.
letter_offset = 0
for letter in message:
rendition = digit_dots[letter]
line_offset = 0
for line in rendition:
if len(self.offscreen) <= line_offset:
# Make space to store the drawing.
self.offscreen.append([])
dot_offset = 0
for dot in line:
if dot == '0':
self.offscreen[line_offset].append(self.set)
else:
self.offscreen[line_offset].append(self.unset)
dot_offset += 1
for blank in range(dot_offset, 6):
# Expand short lines to the full width of 6 voxels.
self.offscreen[line_offset].append(self.unset)
line_offset += 1
letter_offset += 1
# Clear the onscreen buffer.
# Should only happen on the first call.
# Assumption: message will always be the same size.
# Assumption: render() is called before flip().
if self.onscreen == []:
# No onscreen copy yet - so make it the same size as the offscreen image. Fill with suitable voxels.
line_offset = 0
for line in self.offscreen:
self.onscreen.append([])
for dot in line:
self.onscreen[line_offset].append(block.DIRT)
line_offset += 1
def flip(self, client):
"""
Put the off-screen buffer onto the screen.
Only send the differences.
Remember the new screen for next flip.
Draw the clock inverted so it read properly from above.
"""
line_offset = 0
height = len(self.offscreen) - 1
for line in self.offscreen:
dot_offset = 0
length = len(line) - 2 # Fit into the border better.
for dot in line:
if self.onscreen[line_offset][dot_offset] != dot:
self.onscreen[line_offset][dot_offset] = dot
client.setBlock(self.anchor_position.x+length-dot_offset, self.anchor_position.y-3, self.anchor_position.z+height-line_offset, dot)
dot_offset += 1
line_offset += 1
client=minecraft.Minecraft.create(server.address) # Connect to Minecraft.
place=client.player.getPos() # Start near the player.
# place.y is just below the player, and we don't need to change it.
bitmapper = buffer(place)
bitmapper.draw_base(client)
while True:
timestr = time.strftime("%H:%M:%S") # Format time nicely.
bitmapper.render(timestr)
bitmapper.flip(client)
time.sleep(.1) # Rest a while before drawing again.
|
Python
| 0.000004
|
@@ -140,31 +140,28 @@
=33427%0A%0A
-from ..
import
+mcpi.
minecraf
@@ -165,23 +165,42 @@
raft
-%0Afrom .. import
+ as minecraft%0Aimport mcpi.block as
blo
@@ -6408,8 +6408,9 @@
g again.
+%0A
|
c5494d7ce4914932216c3de4a27dbcbb15b1c941
|
correct spelling mistake
|
measurement/measures/distance.py
|
measurement/measures/distance.py
|
# Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convienient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (http://exogen.case.edu/projects/geopy/)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
from measurement.base import MeasureBase, NUMERIC_TYPES, pretty_name
__all__ = [
'Distance',
'Area',
]
AREA_PREFIX = "sq_"
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'yd': 0.9144,
}
SI_UNITS = [
'm'
]
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'foot': 'ft',
'inches': 'inch',
'meter': 'm',
'metre': 'm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': (
'british_chain_sears_truncated'
),
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(
default_unit=AREA_PREFIX + self._default_unit,
**{
AREA_PREFIX + self.STANDARD_UNIT: (
self.standard * other.standard
)
}
)
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)}
)
else:
raise TypeError(
'%(dst)s must be multiplied with number or %(dst)s' % {
"dst": pretty_name(self.__class__),
}
)
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = dict(
[
('%s%s' % (AREA_PREFIX, k), v ** 2)
for k, v in Distance.get_units().items()
]
)
ALIAS = dict(
[
(k, '%s%s' % (AREA_PREFIX, v))
for k, v in Distance.get_aliases().items()
]
)
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)}
)
else:
raise TypeError(
'%(class)s must be divided by a number' % {
"class": pretty_name(self)
}
)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
|
Python
| 0.999884
|
@@ -1637,17 +1637,16 @@
and conv
-i
enient c
|
d7e03596f8bf1e886e984c0ea98334af878a15e2
|
Use __future__.print_function so syntax is valid on Python 3
|
meta/bytecodetools/print_code.py
|
meta/bytecodetools/print_code.py
|
'''
Created on May 10, 2012
@author: sean
'''
from .bytecode_consumer import ByteCodeConsumer
from argparse import ArgumentParser
class ByteCodePrinter(ByteCodeConsumer):
def generic_consume(self, instr):
print instr
def main():
parser = ArgumentParser()
parser.add_argument()
if __name__ == '__main__':
main()
|
Python
| 0.9985
|
@@ -40,16 +40,55 @@
ean%0A'''%0A
+from __future__ import print_function%0A%0A
from .by
@@ -261,22 +261,23 @@
print
-
+(
instr
+)
%0A%0Adef ma
@@ -372,12 +372,13 @@
:%0A main()
+%0A
|
1f343e52abb67ab2f85836b10dadb3cb34a95379
|
fix login issue with django 1.7: check_for_test_cookie is deprecated and removed in django 1.7.
|
xadmin/forms.py
|
xadmin/forms.py
|
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy, ugettext as _
from xadmin.util import User
ERROR_MESSAGE = ugettext_lazy("Please enter the correct username and password "
"for a staff account. Note that both fields are case-sensitive.")
class AdminAuthenticationForm(AuthenticationForm):
"""
A custom authentication form used in the admin app.
"""
this_is_the_login_form = forms.BooleanField(
widget=forms.HiddenInput, initial=1,
error_messages={'required': ugettext_lazy("Please log in again, because your session has expired.")})
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
message = ERROR_MESSAGE
if username and password:
self.user_cache = authenticate(
username=username, password=password)
if self.user_cache is None:
if u'@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
try:
user = User.objects.get(email=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
# Nothing to do here, moving along.
pass
else:
if user.check_password(password):
message = _("Your e-mail address is not your username."
" Try '%s' instead.") % user.username
raise forms.ValidationError(message)
elif not self.user_cache.is_active or not self.user_cache.is_staff:
raise forms.ValidationError(message)
self.check_for_test_cookie()
return self.cleaned_data
|
Python
| 0
|
@@ -1876,45 +1876,8 @@
ge)%0A
- self.check_for_test_cookie()%0A
|
da1884711a7aa32b31f19104dc8513cba02c44ed
|
bump version number
|
cffi_magic/__init__.py
|
cffi_magic/__init__.py
|
"""
A module that define a cffi magic for IPython
import to get `%%cffi` cell magic in current namespace
See `%%cffi?` for usage.
Mostly playing with CFFI, feel free to contact me if you want to take over.
"""
from __future__ import print_function
import re
from cffi import FFI
from hashlib import md5
import string
import logging
import subprocess
from IPython.core.magic import Magics, magics_class, cell_magic
hash_code = lambda s:md5(s.encode()).hexdigest()
import io
import os
__version__ ='0.0.8'
log = logging.getLogger(__name__)
cargotoml="""
[package]
name = "{name}"
version = "0.0.1"
authors = ["No-one InParticular <nobody@example.com>"]
[lib]
name = "{name}"
crate-type = ["dylib"]
[dependencies]
libc = "0.1"
"""
lib_pre = 'lib'
from sys import platform
if platform == 'darwin':
ext = 'dylib'
elif platform == 'win32':
lib_pre = ''
ext = 'dll'
else:
ext = 'so'
@magics_class
class CFFI(Magics):
@cell_magic
def cffi(self, line, cell):
"""
Put declaration on the fisrt line, implementation in second
example:
%%cffis int quad(int);
int quad(int n){
return 4*n;
}
inject `quad`, and `quad_ffi` in user namespace to be usable directly
"""
ffi = FFI()
rname = '_cffi_%s' % hash_code(line+cell)
ffi.cdef(line)
ffi.set_source(rname, cell)
ffi.compile()
mod = __import__(rname)
for attr in dir(mod.lib):
self.shell.user_ns[attr] = getattr(mod.lib, attr)
self.shell.user_ns['%s_ffi' % attr] = mod.ffi
@cell_magic
def rust(self, line, cell):
"""
Rust cffi magic, declaration on first line, rust on the rest.
Example:
```
%%rust int calculate(const char *script);
#![allow(dead_code)]
extern crate libc;
use libc::c_char;
use std::ffi::CStr;
use std::str;
// {{{ fn calculate(c_buf: *const c_char) -> i32 {...}
#[no_mangle]
pub extern fn calculate(c_buf: *const c_char) -> i32 {
let buf = unsafe { CStr::from_ptr(c_buf).to_bytes() };
let slice = str::from_utf8(buf).unwrap();
calc(slice)
} // }}}
fn calc(script: &str) -> i32 {
let mut accumulator = 0;
for c in script.chars() {
match c {
'+' => accumulator += 1,
'-' => accumulator -= 1,
'*' => accumulator *= 2,
'/' => accumulator /= 2,
_ => { /* ignore other characters */ }
}
}
accumulator
}
```
you can now
```
calculate('+ + + * - /')
# 2
```
Exmaple 2:
```
%%rust int double(int);
#[no_mangle]
pub extern fn double(x: i32) -> i32 {
x*2
}
```
now
```
double(3) # return 6
```
"""
ffi = FFI()
rname = '_cffi_%s' % hash_code(line+cell)
with io.open('Cargo.toml','wb') as f:
f.write(cargotoml.format(name=rname).encode('utf-8'))
try:
os.mkdir('src')
except OSError:
pass
with io.open('src/lib.rs', 'wb') as f:
f.write(cell.encode())
subprocess.call(["cargo", "build",'--release'])
ffi.cdef(line)
mod = ffi.dlopen("target/release/{pre}{name}.{ext}".format(name=rname, pre=lib_pre, ext=ext))
exports = re.findall('([a-zA-Z_]+)\(', line)
for attr in exports:
self.shell.user_ns[attr] = getattr(mod, attr)
print("injecting `%s` in user ns" % (attr,))
#self.shell.user_ns['%s_ffi'%attr] = mod.ffi
try:
ip = get_ipython()
ip.register_magics(CFFI)
except NameError:
log.debug('Not in IPython, cffi_magic will have no effect')
example = """
%%cffi int quint(int);
int quint(int n)
{
return 5*n;
}
# quint(9) # 45
"""
|
Python
| 0.000004
|
@@ -504,17 +504,17 @@
_ ='0.0.
-8
+9
'%0Alog =
|
de49b589cf950f76019dc9472b505800ea92a74c
|
Use list instead of deque for improved efficiency
|
yvs/copy_ref.py
|
yvs/copy_ref.py
|
# yvs.copy_ref
import collections
import yvs.shared as shared
from HTMLParser import HTMLParser
# Parser for reference HTML
class ReferenceParser(HTMLParser):
# Associates the given reference object with this parser instance
def __init__(self, ref):
HTMLParser.__init__(self)
if 'verse' in ref:
self.verse_start = ref['verse']
self.verse_end = ref.get('endverse', self.verse_start)
else:
self.verse_start = 1
self.verse_end = None
# Resets parser variables (implicitly called on instantiation)
def reset(self):
HTMLParser.reset(self)
self.depth = 0
self.in_block = None
self.in_verse = None
self.in_verse_content = None
self.block_depth = None
self.verse_depth = None
self.content_depth = None
self.verse_num = None
# Use a deque for efficient appends
self.content_parts = collections.deque()
# Determines if parser is currently within content of verse to include
def is_in_verse_content(self):
return (self.in_verse and self.in_verse_content and
(self.verse_num >= self.verse_start and
(not self.verse_end or self.verse_num <= self.verse_end)))
def handle_starttag(self, tag, attrs):
attr_dict = dict(attrs)
if tag == 'div' or tag == 'span':
self.depth += 1
if 'class' in attr_dict:
elem_class = attr_dict['class']
elem_class_names = elem_class.split(' ')
# Detect paragraph breaks between verses
if elem_class in {'b', 'p'}:
self.in_block = True
self.block_depth = self.depth
self.content_parts.append('\n\n')
# Detect line breaks within a single verse
if elem_class in {'li1', 'q1', 'q2'}:
self.content_parts.append('\n')
# Detect beginning of a single verse (may include footnotes)
if 'verse' in elem_class_names:
self.in_verse = True
self.verse_depth = self.depth
self.verse_num = int(elem_class_names[1][1:])
# Detect beginning of verse content (excludes footnotes)
if elem_class == 'content':
self.in_verse_content = True
self.content_depth = self.depth
def handle_endtag(self, tag):
if self.depth == self.block_depth and self.in_block:
self.in_block = False
self.content_parts.append('\n')
# Determine the end of a verse or its content
if self.depth == self.verse_depth and self.in_verse:
self.in_verse = False
if self.depth == self.content_depth and self.in_verse_content:
self.in_verse_content = False
if tag == 'div' or tag == 'span':
self.depth -= 1
# Handles verse content
def handle_data(self, content):
if self.is_in_verse_content():
self.content_parts.append(content)
# Handles all non-ASCII characters encoded as HTML entities
def handle_charref(self, name):
if self.is_in_verse_content():
char = shared.eval_charref(name)
self.content_parts.append(char)
# Retrieves HTML for reference with the given ID
def get_ref_html(ref):
url = 'https://www.bible.com/bible/{version}/{book}.{chapter}'.format(
version=ref['version_id'],
book=ref['book_id'],
chapter=ref['chapter'])
return shared.get_url_content(url)
# Parses actual reference content from reference HTML
def get_ref_content(ref):
html = get_ref_html(ref)
parser = ReferenceParser(ref)
parser.feed(html)
ref_content = shared.format_ref_content(''.join(parser.content_parts))
# Prepend reference header that identifies reference
ref_content = ''.join((shared.get_full_ref(ref), '\n\n', ref_content))
return ref_content
def main(ref_uid):
ref = shared.get_ref_object(ref_uid)
print(get_ref_content(ref).encode('utf-8'))
if __name__ == '__main__':
main('{query}')
|
Python
| 0
|
@@ -13,27 +13,8 @@
ef%0A%0A
-import collections%0A
impo
@@ -932,27 +932,10 @@
s =
-collections.deque()
+%5B%5D
%0A%0A
|
95a0bcd6bf0e65eb37fc8400db04f72557ba39c7
|
Allow multiple modes to be specified
|
charla/plugins/mode.py
|
charla/plugins/mode.py
|
from itertools import imap
from operator import attrgetter
from six import u
from funcy import take
from ..plugin import BasePlugin
from ..models import Channel, User
from ..commands import BaseCommands
from ..replies import MODE, RPL_UMODEIS, RPL_CHANNELMODEIS
from ..replies import ERR_NEEDMOREPARAMS, ERR_NOSUCHCHANNEL, ERR_NOSUCHNICK
from ..replies import ERR_CHANOPRIVSNEEDED, ERR_UNKNOWNMODE, ERR_USERNOTINCHANNEL
def process_channel_mode(user, channel, mode, *args, **kwargs):
op = kwargs.get("op", None)
if op is not None and user not in channel.operators:
yield False, ERR_CHANOPRIVSNEEDED(channel.name)
return
if op == u("+"):
if mode in channel.modes:
yield False, None
return
channel.modes += mode
else:
if mode not in channel.modes:
yield False, None
return
channel.modes = channel.modes.replace(mode, u(""))
channel.save()
yield True, MODE(channel.name, u("{0}{1}").format(op, mode), prefix=user.prefix)
def process_channel_mode_ov(user, channel, mode, *args, **kwargs):
op = kwargs.get("op", None)
if op is not None and user not in channel.operators:
yield False, ERR_CHANOPRIVSNEEDED(channel.name)
return
nick = args[0]
if nick not in imap(attrgetter("nick"), channel.users):
yield False, ERR_USERNOTINCHANNEL(nick, channel.name)
return
nick = User.objects.filter(nick=nick).first()
if mode == u("o"):
collection = channel.operators
elif mode == u("v"):
collection = channel.voiced
if op == u("+"):
collection.append(nick)
channel.save()
yield True, MODE(channel.name, u("{0}{1}").format(op, mode), [nick.nick], prefix=user.prefix)
elif op == u("-"):
collection.remove(nick)
channel.save()
yield True, MODE(channel.name, u("{0}{1}").format(op, mode), [nick.nick], prefix=user.prefix)
channel_modes = {
u("n"): (0, process_channel_mode),
u("t"): (0, process_channel_mode),
u("o"): (1, process_channel_mode_ov),
u("v"): (1, process_channel_mode_ov),
}
def process_channel_modes(user, channel, modes):
op = None
modes = iter(modes)
while True:
try:
mode = next(modes)
if mode and mode[0] == u("+"):
op = u("+")
mode = mode[1:]
elif mode and mode[0] == u("-"):
op = u("-")
mode = mode[1:]
if mode not in channel_modes:
yield False, ERR_UNKNOWNMODE(mode)
else:
nargs, f = channel_modes[mode]
for notify, message in f(user, channel, mode, *take(nargs, modes), op=op):
yield notify, message
except StopIteration:
break
def process_user_mode(user, mode, op=None):
if op == u("+"):
if mode in user.modes:
return
user.modes += mode
else:
if mode not in user.modes:
return
user.modes = user.modes.replace(mode, u(""))
user.save()
return MODE(user.nick, u("{0}{1})").format(op, mode), prefix=user.nick)
user_modes = {
u("i"): (0, process_user_mode),
}
def process_user_modes(user, modes):
op = None
modes = iter(modes)
while True:
try:
mode = next(modes)
if mode and mode[0] == u("+"):
op = u("+")
mode = mode[1:]
elif mode and mode[0] == u("-"):
op = u("-")
mode = mode[1:]
if mode not in user_modes:
yield ERR_UNKNOWNMODE(mode)
else:
nargs, f = user_modes[mode]
yield f(user, mode, op=op)
except StopIteration:
break
class Commands(BaseCommands):
def _process_channel_modes(self, user, channel, modes):
for notify, message in process_channel_modes(user, channel, modes):
if notify:
self.notify(channel.users[:], message)
elif message is not None:
yield message
def mode(self, sock, source, *args):
"""MODE command
This command allows the user to display modes of another user
or channel and set modes of other users and channels.
"""
if not args:
return ERR_NEEDMOREPARAMS(u"MODE")
args = iter(args)
mask = next(args)
if mask.startswith(u("#")):
channel = Channel.objects.filter(name=mask).first()
if channel is None:
return ERR_NOSUCHCHANNEL(mask)
user = User.objects.filter(sock=sock).first()
mode = next(args, None)
if mode is None:
return RPL_CHANNELMODEIS(channel.name, u("+{0}").format(channel.modes))
return self._process_channel_modes(user, channel, [mode] + list(args))
else:
user = User.objects.filter(nick=mask).first()
if user is None:
return ERR_NOSUCHNICK(mask)
mode = next(args, None)
if mode is None:
return RPL_UMODEIS(u("+{0}").format(user.modes))
return process_user_modes(user, [mode] + list(args))
class Mode(BasePlugin):
def init(self, *args, **kwargs):
super(Mode, self).init(*args, **kwargs)
Commands(*args, **kwargs).register(self)
|
Python
| 0.000001
|
@@ -4943,29 +4943,33 @@
hannel,
-%5B
mode
-%5D
+
-list
+u(%22%22).join
(args))%0A
@@ -5293,21 +5293,25 @@
er,
-%5B
mode
-%5D
+
-list
+u(%22%22).join
(arg
|
19e59e90cd44f6375d81c971bb5005efc1165a08
|
Fix security issue in filter_non_video_iframes
|
website/utils/filters.py
|
website/utils/filters.py
|
def filter_non_video_iframes(html, testing = False):
"""
Given an HTML string, strips iframe tags that do not
(just) contain an embedded video.
Returns the remaining HTML string.
"""
from bs4 import BeautifulSoup
import re
# Tuple of regexes that define allowed URL patterns
matchers = ("^(https?:)?//www\.youtube\.com/embed/[a-zA-Z0-9-_]{8,15}$",)
# Tuple of allowed attributes in an iframe
allowed_attributes = ('height', 'src', 'width')
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
for iframe in dom.findAll("iframe"):
src = iframe.get("src", "")
matched = False
# Check whether any one matcher matches
for matcher in matchers:
exp = re.compile(matcher)
if exp.match(src):
matched = True
break
# If no matcher matched, remove the iframe
if not matched:
iframe.extract()
break
# If iframe tag contains something, remove the iframe
if len(iframe.contents) > 0:
iframe.extract()
break
# Check for illegal iframe attributes
for attr in iframe.attrs:
# If iframe contains illegal attribute, remove the iframe
if attr not in allowed_attributes:
iframe.extract()
break
return str(dom)
def obfuscate_email_addresses(html):
"""
Given an HTML string, will obfuscate e-mail addresses using HTML entities.
Works on mailto links and plain e-mail addresses.
Returns the HTML string with obfuscated e-mail addresses.
"""
from bs4 import BeautifulSoup
import re
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# First, look for mailto: links and obfuscate them
for link in dom.findAll("a"):
href = link.get("href", "")
if href.startswith("mailto:"):
link['href'] = "".join(['&#%i;' % ord(char) for char in href])
# The intermediate HTML has all mailto: links obfuscated. Plaintext
# e-mail addresses are next.
intermediate_html = str(dom)
email_seeker = re.compile("([\w._%+-]+@[\w.-]+\.[A-Za-z]{2,4})")
resulting_html = ""
for index, fragment in enumerate(email_seeker.split(intermediate_html)):
if index % 2 != 0:
resulting_html += "".join(['&#%i;' % ord(char) for char in fragment])
else:
resulting_html += fragment
return resulting_html
def strip_scripts_not_in_whitelist(html):
"""
Given an HTML string, will strip all script tags that do not conform to
one of the whitelist patterns as defined in settings.py.
"""
from bs4 import BeautifulSoup
from mezzanine.conf import settings
import logging
logger = logging.getLogger(__name__)
# Parse the whitelist into a list of tags (to make sure format matches exactly)
allowed_tags = []
for allowed_tag_str in settings.RICHTEXT_SCRIPT_TAG_WHITELIST:
allowed_tags.append(str(BeautifulSoup(allowed_tag_str, "html.parser").find("script")))
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# Look for all script tags and match them to the whitelist
for script_tag in dom.findAll("script"):
if str(script_tag) not in allowed_tags:
script_tag.extract()
logger.debug("Found non-whitelisted script tag. Stripped.")
logger.debug("CONF: stripped tag is "+str(script_tag))
else:
logger.debug("Found whitelisted script tag. Did not strip.")
return str(dom)
|
Python
| 0.000001
|
@@ -981,37 +981,40 @@
t()%0A
-break
+continue
%0A # If if
@@ -1130,37 +1130,40 @@
t()%0A
-break
+continue
%0A # Check
|
f99246cb8a41f9271d4d531c036975c9d105d973
|
Add ignored exceptions
|
polyaxon/polyaxon/config_settings/logging.py
|
polyaxon/polyaxon/config_settings/logging.py
|
import os
from polyaxon.config_manager import ROOT_DIR, config
LOG_DIRECTORY = ROOT_DIR.child('logs')
if not os.path.exists(LOG_DIRECTORY):
os.makedirs(LOG_DIRECTORY)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] %(levelname)s %(message)s [%(name)s:%(lineno)s]',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'simple': {
'format': '%(levelname)8s %(message)s [%(name)s]'
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'logfile': {
'level': config.log_level,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '{}/polyaxon_{}.log'.format(LOG_DIRECTORY, os.getpid()),
'maxBytes': 1024 * 1024 * 8, # 8 MByte
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': config.log_level,
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
},
'loggers': {
'polyaxon.streams': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'polyaxon.monitors': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'polyaxon.dockerizer': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'django.request': {
'level': config.log_level,
'propagate': True,
'handlers': ['console', ],
},
},
}
RAVEN_CONFIG = {}
if not (config.is_testing_env or config.is_local_env) and config.platform_dsn:
RAVEN_CONFIG['dsn'] = config.platform_dsn
RAVEN_CONFIG['transport'] = "raven.transport.threaded_requests.ThreadedRequestsHTTPTransport"
RAVEN_CONFIG['release'] = config.get_string('POLYAXON_CHART_VERSION',
is_optional=True,
default='0.0.0')
RAVEN_CONFIG['environment'] = config.env
|
Python
| 0.000032
|
@@ -2500,16 +2500,227 @@
0.0.0')%0A
+ RAVEN_CONFIG%5B'IGNORE_EXCEPTIONS'%5D = %5B'django.db.ProgrammingError',%0A 'django.db.OperationalError',%0A 'django.db.InterfaceError'%5D%0A
RAVE
|
54282058900b473b1e1211f8e0b68c1d36280788
|
Fix investigations if no user input
|
core/web/frontend/investigations.py
|
core/web/frontend/investigations.py
|
from __future__ import unicode_literals
from flask_classy import route
from flask_login import current_user
from flask import render_template, request, flash, redirect, url_for
from mongoengine import DoesNotExist
from core.web.frontend.generic import GenericView
from core.investigation import Investigation, ImportMethod, ImportResults
from core.web.helpers import get_object_or_404
from core.web.helpers import requires_permissions
from core.database import AttachedFile
from core.entities import Entity
from core.indicators import Indicator
from core.observables import Observable
from core.web.api.api import bson_renderer
class InvestigationView(GenericView):
klass = Investigation
@route("/graph/<id>")
@requires_permissions("read", "investigation")
def graph(self, id):
investigation = get_object_or_404(Investigation, id=id)
return render_template(
"{}/graph.html".format(self.klass.__name__.lower()),
investigation=bson_renderer(investigation.info()))
@route("/graph/<klass>/<id>")
@requires_permissions("read", "investigation")
def graph_node(self, klass, id):
if klass == 'entity':
node = get_object_or_404(Entity, id=id)
elif klass == 'indicator':
node = get_object_or_404(Indicator, id=id)
else:
node = get_object_or_404(Observable, id=id)
investigation = Investigation(created_by=current_user.username).save()
investigation.add([], [node])
return render_template(
"{}/graph.html".format(self.klass.__name__.lower()),
investigation=bson_renderer(investigation.info()))
@route("/import/<id>", methods=['GET'])
@requires_permissions("write", "investigation")
def import_wait(self, id):
results = get_object_or_404(ImportResults, id=id)
return render_template(
"{}/import_wait.html".format(self.klass.__name__.lower()),
import_results=results)
@route("/import", methods=['GET', 'POST'])
@requires_permissions("write", "investigation")
def inv_import(self):
if request.method == "GET":
return render_template(
"{}/import.html".format(self.klass.__name__.lower()))
else:
text = request.form.get('text')
url = request.form.get('url')
if text:
investigation = Investigation(
created_by=current_user.username, import_text=text)
investigation.save()
return redirect(
url_for(
'frontend.InvestigationView:import_from',
id=investigation.id))
else:
try:
if url:
import_method = ImportMethod.objects.get(acts_on="url")
results = import_method.run(url)
else:
target = AttachedFile.from_upload(request.files['file'])
import_method = ImportMethod.objects.get(
acts_on=target.content_type)
results = import_method.run(target)
return redirect(
url_for(
'frontend.InvestigationView:import_wait',
id=results.id))
except DoesNotExist:
flash("This file type is not supported.", "danger")
return render_template(
"{}/import.html".format(self.klass.__name__.lower()))
@route("/<id>/import", methods=['GET'])
@requires_permissions("write", "investigation")
def import_from(self, id):
investigation = get_object_or_404(Investigation, id=id)
observables = Observable.from_string(investigation.import_text)
return render_template(
"{}/import_from.html".format(self.klass.__name__.lower()),
investigation=investigation,
observables=bson_renderer(observables))
def handle_form(self, *args, **kwargs):
kwargs['skip_validation'] = True
return super(InvestigationView, self).handle_form(*args, **kwargs)
|
Python
| 0.014118
|
@@ -2927,34 +2927,58 @@
el
+if %22file%22 in request.file
s
-e
:%0A
@@ -3226,16 +3226,172 @@
(target)
+%0A else:%0A flash(%22You need to provide an input%22, %22danger%22)%0A return redirect(request.referrer)
%0A%0A
|
c9c312555d5137daa6ce0525bcd7b4f160a0d133
|
load file in chunks
|
corehq/apps/dump_reload/sql/load.py
|
corehq/apps/dump_reload/sql/load.py
|
from __future__ import unicode_literals
import json
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.core.management.color import no_style
from django.core.serializers.python import (
Deserializer as PythonDeserializer,
)
from django.db import (
DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils.encoding import force_text
from corehq.form_processor.backends.sql.dbaccessors import ShardAccessor
from corehq.sql_db.config import partition_config
partitioned_model_id_fields = {
'form_processor.XFormInstanceSQL': 'form_id',
'form_processor.XFormAttachmentSQL': 'form',
'form_processor.XFormOperationSQL': 'form',
}
def load_sql_data(data_file):
"""
Loads data from a given file.
:return: tuple(total object count, loaded object count)
"""
# Keep a count of the installed objects
loaded_object_count = 0
total_object_count = 0
objects = json.load(data_file)
for db_alias, objects_for_db in _group_objects_by_db(objects):
with transaction.atomic(using=db_alias):
loaded_objects, num_objects = load_data_for_db(db_alias, objects)
loaded_object_count += loaded_objects
total_object_count += num_objects
return total_object_count, loaded_object_count
def load_data_for_db(db_alias, objects):
connection = connections[db_alias]
objects_count = 0
loaded_object_count = 0
models = set()
with connection.constraint_checks_disabled():
for obj in PythonDeserializer(objects, using=db_alias):
objects_count += 1
if router.allow_migrate_model(db_alias, obj.object.__class__):
loaded_object_count += 1
models.add(obj.object.__class__)
try:
obj.save(using=db_alias)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem loading data: %s" % e,)
raise
# If we found even one object, we need to reset the database sequences.
if loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), models)
if sequence_sql:
with connection.cursor() as cursor:
for line in sequence_sql:
print line
cursor.execute(line)
return loaded_object_count, objects_count
def _group_objects_by_db(objects):
objects_by_db = defaultdict(list)
for obj in objects:
app_label = obj['model']
model = apps.get_model(app_label)
db_alias = router.db_for_write(model)
if settings.USE_PARTITIONED_DATABASE and db_alias == partition_config.get_proxy_db():
doc_id = _get_doc_id(app_label, obj)
db_alias = ShardAccessor.get_database_for_doc(doc_id)
objects_by_db[db_alias].append(obj)
return objects_by_db.items()
def _get_doc_id(app_label, model_json):
field = partitioned_model_id_fields[app_label]
return model_json[field]
|
Python
| 0.000001
|
@@ -978,42 +978,559 @@
= 0%0A
+%0A
-objects = json.load(data_file)
+def _process_chunk(chunk):%0A global total_object_count, loaded_object_count%0A chunk_total, chunk_loaded = load_objects(chunk)%0A total_object_count += len(chunk)%0A loaded_object_count += chunk_loaded%0A%0A chunk = %5B%5D%0A for line in data_file:%0A chunk.append(json.load(line))%0A if len(chunk) %3E= 1000:%0A _process_chunk(chunk)%0A chunk = %5B%5D%0A%0A if chunk:%0A _process_chunk(chunk)%0A%0A return total_object_count, loaded_object_count%0A%0A%0Adef load_objects(objects):%0A loaded_object_count = 0%0A
%0A
@@ -1672,21 +1672,8 @@
ects
-, num_objects
= l
@@ -1757,81 +1757,19 @@
cts%0A
- total_object_count += num_objects%0A%0A return total_object_count,
+%0A return
loa
@@ -1872,30 +1872,8 @@
s%5D%0A%0A
- objects_count = 0%0A
@@ -2033,39 +2033,8 @@
s):%0A
- objects_count += 1%0A
@@ -3358,39 +3358,8 @@
ql:%0A
- print line%0A
@@ -3429,23 +3429,8 @@
ount
-, objects_count
%0A%0A%0Ad
|
e519e6367686b2439b07ecd01a2a28ea5eded64d
|
use json encoder that handles decimals
|
corehq/apps/tzmigration/planning.py
|
corehq/apps/tzmigration/planning.py
|
import json
import sqlite3
from sqlite3 import dbapi2 as sqlite
from sqlalchemy import create_engine, Column, Integer, ForeignKey, String, \
UnicodeText, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
Base = declarative_base()
class PlanningForm(Base):
__tablename__ = 'form'
id = Column(Integer, primary_key=True)
uuid = Column(String(50), nullable=False, unique=True)
form_json = Column(UnicodeText, nullable=False)
class PlanningCase(Base):
__tablename__ = 'case'
id = Column(Integer, primary_key=True)
uuid = Column(String(50), nullable=False, unique=True)
case_json = Column(UnicodeText, nullable=True)
doc_type = Column(String(50), nullable=True)
class PlanningCaseAction(Base):
__tablename__ = 'case_action'
id = Column(Integer, primary_key=True)
form = Column(Integer, ForeignKey('form.id'), nullable=False)
case = Column(Integer, ForeignKey('case.id'), nullable=False)
action_json = Column(UnicodeText, nullable=False)
class PlanningDiff(Base):
__tablename__ = 'diff'
id = Column(Integer, primary_key=True)
kind = Column(String(50), nullable=False)
doc_id = Column(String(50), nullable=False)
diff_type = Column(String(50), nullable=False)
path = Column(Text(), nullable=False)
old_value = Column(UnicodeText, nullable=True)
new_value = Column(UnicodeText, nullable=True)
@property
def json_diff(self):
from corehq.apps.tzmigration.timezonemigration import FormJsonDiff
def json_loads_or_ellipsis(val):
if val is None:
return Ellipsis
else:
return json.loads(val)
return FormJsonDiff(
self.diff_type, json.loads(self.path),
json_loads_or_ellipsis(self.old_value),
json_loads_or_ellipsis(self.new_value)
)
class PlanningStockReportHelper(Base):
__tablename__ = 'stock_report_helper'
id = Column(Integer, primary_key=True)
form = Column(Integer, ForeignKey('form.id'), nullable=False)
stock_report_helper_json = Column(UnicodeText, nullable=False)
class BaseDB(object):
def __init__(self, db_filepath):
self.db_filepath = db_filepath
self._connection = None
self.engine = create_engine(
'sqlite+pysqlite:///{}'.format(db_filepath), module=sqlite)
self.Session = sessionmaker(bind=self.engine)
@classmethod
def init(cls, db_filepath):
self = cls(db_filepath)
Base.metadata.create_all(self.engine)
return self
@classmethod
def open(cls, db_filepath):
return cls(db_filepath)
@property
def connection(self):
if not self._connection:
self._connection = sqlite3.connect(self.db_filepath)
return self._connection
class DiffDB(BaseDB):
def add_diffs(self, kind, doc_id, doc_diffs):
session = self.Session()
def json_dumps_or_none(val):
if val is Ellipsis:
return None
else:
return json.dumps(val)
for d in doc_diffs:
session.add(PlanningDiff(
kind=kind,
doc_id=doc_id, diff_type=d.diff_type, path=json.dumps(d.path),
old_value=json_dumps_or_none(d.old_value),
new_value=json_dumps_or_none(d.new_value)))
session.commit()
def get_diffs(self):
session = self.Session()
return session.query(PlanningDiff).all()
class PlanningDB(DiffDB):
def add_form(self, form_id, form_json):
session = self.Session()
session.add(PlanningForm(uuid=form_id, form_json=json.dumps(form_json)))
session.commit()
def ensure_case(self, case_id):
session = self.Session()
try:
(session.query(PlanningCase)
.filter(PlanningCase.uuid == case_id).one())
except NoResultFound:
session.add(PlanningCase(uuid=case_id))
session.commit()
def add_case_actions(self, case_id, case_actions):
session = self.Session()
for xform_id, case_action in case_actions:
session.add(PlanningCaseAction(
form=xform_id, case=case_id,
action_json=json.dumps(case_action)))
session.commit()
def add_stock_report_helpers(self, stock_report_helpers):
session = self.Session()
for stock_report_helper in stock_report_helpers:
session.add(
PlanningStockReportHelper(
form=stock_report_helper.form_id,
stock_report_helper_json=json.dumps(stock_report_helper)))
session.commit()
def get_all_form_ids(self):
session = self.Session()
form_ids = {
uuid for (uuid,) in
session.query(PlanningForm).with_entities(PlanningForm.uuid).all()
}
return form_ids
def get_all_case_ids(self, valid_only=True):
"""Exclude CommCareCare-Deleted"""
session = self.Session()
query = session.query(PlanningCase).with_entities(PlanningCase.uuid)
if valid_only:
query = query.filter(PlanningCase.doc_type == 'CommCareCase')
case_ids = {uuid for (uuid,) in query.all()}
return case_ids
def update_case_json(self, case_id, case_json):
session = self.Session()
(session.query(PlanningCase).filter(PlanningCase.uuid == case_id)
.update({'case_json': json.dumps(case_json)}))
session.commit()
def update_case_doc_type(self, case_id, doc_type):
session = self.Session()
(session.query(PlanningCase).filter(PlanningCase.uuid == case_id)
.update({'doc_type': doc_type}))
session.commit()
def get_actions_by_case(self, case_id):
session = self.Session()
result = (
session.query(PlanningCaseAction)
.filter(PlanningCaseAction.case == case_id)
# this should keep them in form insert order
.order_by(PlanningCaseAction.id)
.with_entities(PlanningCaseAction.action_json))
return [json.loads(action_json) for action_json, in result]
def get_forms(self):
session = self.Session()
return (json.loads(form_json) for form_json, in
session.query(PlanningForm).order_by(PlanningForm.id)
.with_entities(PlanningForm.form_json).all())
def get_cases(self):
session = self.Session()
return (json.loads(case_json) for case_json, in
session.query(PlanningCase).order_by(PlanningCase.id)
.with_entities(PlanningCase.case_json).all())
|
Python
| 0.000007
|
@@ -299,16 +299,71 @@
tFound%0A%0A
+from corehq.apps.hqwebapp.encoders import LazyEncoder%0A%0A
Base = d
@@ -3215,24 +3215,41 @@
on.dumps(val
+, cls=LazyEncoder
)%0A%0A f
|
f058477eb0320fb28a00626c246d6ad7b2d120a2
|
use new experiment_utils.{get_fs_writer,get_fs_reader_funcs}
|
crosscat/tests/geweke_on_schemas.py
|
crosscat/tests/geweke_on_schemas.py
|
import argparse
import itertools
import functools
#
import crosscat.utils.geweke_utils as geweke_utils
import crosscat.utils.experiment_utils as experiment_utils
from crosscat.utils.general_utils import MapperContext, NoDaemonPool, Timer
def generate_args_list(base_num_rows, num_iters):
num_iters_str = str(num_iters)
base_num_rows_str = str(base_num_rows)
col_type_list = ['continuous', 'multinomial']
#
base_args = ['--num_rows', base_num_rows_str, '--num_iters', num_iters_str]
col_type_pairs = sorted(itertools.combinations(col_type_list, 2))
args_list = []
# single datatype
num_cols_list = [1, 10]
iter_over = itertools.product(col_type_list, num_cols_list)
for col_type, num_cols in iter_over:
args = base_args + \
['--num_cols', str(num_cols), '--cctypes'] + \
[col_type] * num_cols
args_list.append(args)
pass
# pairs of datatypes
iter_over = itertools.product(col_type_pairs, num_cols_list)
for (col_type_a, col_type_b), num_cols in iter_over:
args = base_args + \
['--num_cols', str(2 * num_cols), '--cctypes'] + \
[col_type_a] * num_cols + \
[col_type_b] * num_cols
args_list.append(args)
pass
# # individual schemas
# num_cols = 100
# args = ['--num_rows', '100', '--num_iters', num_iters_str, '--num_cols',
# str(num_cols), '--cctypes'] + ['continuous'] * num_cols
# args_list.append(args)
# args = ['--num_rows', '100', '--num_iters', num_iters_str, '--num_cols',
# str(num_cols), '--num_multinomial_values', '128', '--cctypes'] + \
# ['multinomial'] * num_cols
# args_list.append(args)
# num_cols = 1000
# args = ['--num_rows', '100', '--num_iters', num_iters_str, '--num_cols',
# str(num_cols), '--num_multinomial_values', '2', '--cctypes'] + \
# ['multinomial'] * num_cols
# args_list.append(args)
return args_list
is_result_filepath = geweke_utils.is_summary_file
config_to_filepath = geweke_utils.config_to_filepath
runner = geweke_utils.run_geweke
do_experiments = experiment_utils.do_experiments
# use provided local file system writer
_writer = experiment_utils.fs_write_result
writer = functools.partial(_writer, config_to_filepath)
def print_all_summaries(filter_func=None):
# you could read results like this
_read_all_configs = experiment_utils.fs_read_all_configs
_reader = experiment_utils.fs_read_result
read_results = experiment_utils.read_results
read_all_configs = functools.partial(_read_all_configs, is_result_filepath)
reader = functools.partial(_reader, config_to_filepath)
config_list = read_all_configs(dirname)
config_list = filter(filter_func, config_list)
results = read_results(reader, config_list, dirname)
for result in results:
print
print result['config']
print result['summary']
print
pass
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dirname', default='geweke_on_schemas', type=str)
parser.add_argument('--base_num_rows', default=40, type=int)
parser.add_argument('--num_iters', default=400, type=int)
args = parser.parse_args()
dirname = args.dirname
base_num_rows = args.base_num_rows
num_iters = args.num_iters
args_to_config = geweke_utils.args_to_config
args_list = generate_args_list(base_num_rows, num_iters)
config_list = map(args_to_config, args_list)
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
# print_all_summaries()
|
Python
| 0.000004
|
@@ -2021,422 +2021,163 @@
st%0A%0A
-%0Ais_result_filepath = geweke_utils.is_summary_file%0Aconfig_to_filepath = geweke_utils.config_to_filepath%0Arunner = geweke_utils.run_geweke%0Ado_experiments = experiment_utils.do_experiments%0A# use provided local file system writer%0A_writer = experiment_utils.fs_write_result%0Awriter = functools.partial(_writer, config_to_filepath)%0A%0A%0Adef print_all_summaries(filter_func=None):%0A # you could read results like this
+def plot_all_configs(dirname='./'):%0A pass%0A%0Adef print_all_summaries(is_result_filepath, config_to_filepath,%0A filter_func=None, dirname='./'):
%0A
-_
read
@@ -2192,261 +2192,101 @@
figs
- = experiment_utils.fs_read_all_configs%0A _reader = experiment_utils.fs_read_result%0A read_results = experiment_utils.read_results%0A read_all_configs = functools.partial(_read_all_configs, is_result_filepath)%0A reader = functools.partial(_reader
+, reader, read_results = experiment_utils.get_fs_reader_funcs(%0A is_result_filepath
, co
@@ -2425,24 +2425,16 @@
results(
-reader,
config_l
@@ -2856,24 +2856,89 @@
, type=int)%0A
+ parser.add_argument('--generate_plots', action='store_true')%0A
args = p
@@ -3057,58 +3057,372 @@
ers%0A
-%0A%0A args_to_config = geweke_utils.args_to_config
+ generate_plots = args.generate_plots%0A%0A%0A is_result_filepath = geweke_utils.is_summary_file%0A config_to_filepath = geweke_utils.config_to_filepath%0A runner = geweke_utils.run_geweke%0A args_to_config = geweke_utils.args_to_config%0A #%0A do_experiments = experiment_utils.do_experiments%0A writer = experiment_utils.get_fs_writer(config_to_filepath)%0A%0A
%0A
@@ -3528,17 +3528,16 @@
s_list)%0A
-%0A
with
@@ -3818,28 +3818,166 @@
-# print_all_summaries(
+if generate_plots:%0A print 'would have run plot_all_configs'%0A pass%0A%0A print_all_summaries(is_result_filepath, config_to_filepath, dirname=dirname
)%0A
|
7b6542d58bbe788587b47e282ef393eda461f267
|
add get method in UserAPI
|
api/route/user.py
|
api/route/user.py
|
from flask import request
from flask.ext import restful
from flask.ext.restful import marshal_with
from route.base import api
from flask.ext.bcrypt import generate_password_hash
from model.base import db
from model.user import User, user_marshaller
class UserAPI(restful.Resource):
@marshal_with(user_marshaller)
def post(self):
data = request.get_json()
hashed_password = generate_password_hash(data['password'])
user = User(data['first_name'], data['last_name'], data['email'], hashed_password, data['birthday'])
db.session.add(user)
db.session.commit()
return user
api.add_resource(UserAPI, "/user")
|
Python
| 0.000001
|
@@ -623,16 +623,109 @@
n user%0A%0A
+ @marshal_with(user_marshaller)%0A def get(self):%0A%09user = User.query.all()%0A%09return user%0A%0A
api.add_
@@ -750,8 +750,9 @@
%22/user%22)
+%0A
|
346a7d18ef6dc063e2802a0347709700a1543902
|
update 影视列表
|
1/showics/models.py
|
1/showics/models.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last modified: Wang Tai (i@wangtai.me)
"""docstring
"""
__revision__ = '0.1'
from django.db import models
class ShowTableIcs(models.Model):
# uid
uid = models.CharField(max_length=255, unique=True, primary_key=True)
# title
title = models.CharField(max_length=255, null=False)
# description
description = models.CharField(max_length=255)
# date
date = models.DateField()
class Meta(object):
db_table = 'show_table_ics'
|
Python
| 0
|
@@ -190,18 +190,8 @@
l):%0A
- # uid%0A
@@ -264,20 +264,8 @@
ue)%0A
- # title%0A
@@ -321,26 +321,8 @@
se)%0A
- # description%0A
@@ -372,19 +372,8 @@
55)%0A
- # date%0A
@@ -398,16 +398,17 @@
Field()%0A
+%0A
clas
@@ -423,16 +423,16 @@
bject):%0A
-
@@ -458,8 +458,231 @@
ble_ics'
+%0A%0A%0Aclass ShowList(models.Model):%0A show_id = models.CharField(max_length=255, primary_key=True)%0A title = models.CharField(max_length=255, unique=True, null=False)%0A%0A class Meta(object):%0A db_table = 'show_list'
|
736e850a8ee4df1efeae41407a83bcde356d28bd
|
Use werzeug.urls instead of urlparse
|
addons/mass_mailing/models/mail_mail.py
|
addons/mass_mailing/models/mail_mail.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import werkzeug.urls
from odoo import api, fields, models, tools
from openerp.addons.link_tracker.models.link_tracker import URL_REGEX
class MailMail(models.Model):
"""Add the mass mailing campaign data to mail"""
_inherit = ['mail.mail']
mailing_id = fields.Many2one('mail.mass_mailing', string='Mass Mailing')
statistics_ids = fields.One2many('mail.mail.statistics', 'mail_mail_id', string='Statistics')
@api.model
def create(self, values):
""" Override mail_mail creation to create an entry in mail.mail.statistics """
# TDE note: should be after 'all values computed', to have values (FIXME after merging other branch holding create refactoring)
mail = super(MailMail, self).create(values)
if values.get('statistics_ids'):
mail_sudo = mail.sudo()
mail_sudo.statistics_ids.write({'message_id': mail_sudo.message_id, 'state': 'outgoing'})
return mail
def _get_tracking_url(self, partner=None):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
track_url = werkzeug.urls.url_join(
base_url, 'mail/track/%(mail_id)s/blank.gif?%(params)s' % {
'mail_id': self.id,
'params': werkzeug.urls.url_encode({'db': self.env.cr.dbname})
}
)
return '<img src="%s" alt=""/>' % track_url
def _get_unsubscribe_url(self, email_to):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
url = werkzeug.urls.url_join(
base_url, 'mail/mailing/%(mailing_id)s/unsubscribe?%(params)s' % {
'mailing_id': self.mailing_id.id,
'params': werkzeug.urls.url_encode({
'db': self.env.cr.dbname,
'res_id': self.res_id,
'email': email_to,
'token': self.mailing_id._unsubscribe_token(
self.res_id, email_to),
}),
}
)
return url
@api.multi
def send_get_mail_body(self, partner=None):
""" Override to add the tracking URL to the body and to add
Statistic_id in shorted urls """
# TDE: temporary addition (mail was parameter) due to semi-new-API
self.ensure_one()
body = super(MailMail, self).send_get_mail_body(partner=partner)
if self.mailing_id and body and self.statistics_ids:
for match in re.findall(URL_REGEX, self.body_html):
href = match[0]
url = match[1]
parsed = urlparse.urlparse(url, scheme='http')
if parsed.scheme.startswith('http') and parsed.path.startswith('/r/'):
new_href = href.replace(url, url + '/m/' + str(self.statistics_ids[0].id))
body = body.replace(href, new_href)
# prepend <base> tag for images using absolute urls
domain = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
base = "<base href='%s'>" % domain
body = tools.append_content_to_html(base, body, plaintext=False, container_tag='div')
# resolve relative image url to absolute for outlook.com
def _sub_relative2absolute(match):
return match.group(1) + werkzeug.urls.url_join(domain, match.group(2))
body = re.sub('(<img(?=\s)[^>]*\ssrc=")(/[^/][^"]+)', _sub_relative2absolute, body)
body = re.sub(r'(<[^>]+\bstyle="[^"]+\burl\(\'?)(/[^/\'][^\'")]+)', _sub_relative2absolute, body)
# generate tracking URL
if self.statistics_ids:
tracking_url = self._get_tracking_url(partner)
if tracking_url:
body = tools.append_content_to_html(body, tracking_url, plaintext=False, container_tag='div')
return body
@api.multi
def send_get_email_dict(self, partner=None):
# TDE: temporary addition (mail was parameter) due to semi-new-API
res = super(MailMail, self).send_get_email_dict(partner)
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
if self.mailing_id and res.get('body') and res.get('email_to'):
emails = tools.email_split(res.get('email_to')[0])
email_to = emails and emails[0] or False
unsubscribe_url = self._get_unsubscribe_url(email_to)
link_to_replace = base_url + '/unsubscribe_from_list'
if link_to_replace in res['body']:
res['body'] = res['body'].replace(link_to_replace, unsubscribe_url if unsubscribe_url else '#')
return res
@api.multi
def _postprocess_sent_message(self, mail_sent=True):
for mail in self:
if mail_sent is True and mail.statistics_ids:
mail.statistics_ids.write({'sent': fields.Datetime.now(), 'exception': False})
elif mail_sent is False and mail.statistics_ids:
mail.statistics_ids.write({'exception': fields.Datetime.now()})
return super(MailMail, self)._postprocess_sent_message(mail_sent=mail_sent)
|
Python
| 0.000002
|
@@ -2710,20 +2710,26 @@
d =
-urlparse
+werkzeug.urls
.url
+_
pars
|
5b2cc6ed06045bbe219f9cf81317c1c1a5bac714
|
add missing docstring in ttls
|
biggraphite/drivers/ttls.py
|
biggraphite/drivers/ttls.py
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time constants and functions used by accessors."""
import dateutil
import time
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
DEFAULT_UPDATED_ON_TTL_SEC = 3 * DAY
def str_to_datetime(str_repr):
if not str_repr:
return None
return dateutil.parser.parse(str_repr)
def str_to_timestamp(str_repr):
if not str_repr:
return None
datetime_tuple = str_to_datetime(str_repr)
ts = time.mktime(datetime_tuple.timetuple())
return ts
|
Python
| 0.000007
|
@@ -779,32 +779,76 @@
time(str_repr):%0A
+ %22%22%22Convert a string into a datetime.%22%22%22%0A
if not str_r
@@ -945,24 +945,69 @@
(str_repr):%0A
+ %22%22%22Convert a string into a timestamp.%22%22%22%0A
if not s
|
808a5b14fc0bfff8d8c23cb4e1f125ef84de6d91
|
Remove deprecated oslotest.mockpatch usage
|
bilean/tests/common/base.py
|
bilean/tests/common/base.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import fixtures
from oslotest import mockpatch
import testscenarios
import testtools
from bilean.common import messaging
from bilean.tests.common import utils
TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN,
'sqlalchemy': logging.WARN}
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class FakeLogMixin(object):
def setup_logging(self):
# Assign default logs to self.LOG so we can still
# assert on bilean logs.
default_level = logging.INFO
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
default_level = logging.DEBUG
self.LOG = self.useFixture(
fixtures.FakeLogger(level=default_level, format=_LOG_FORMAT))
base_list = set([nlog.split('.')[0]
for nlog in logging.Logger.manager.loggerDict])
for base in base_list:
if base in TEST_DEFAULT_LOGLEVELS:
self.useFixture(fixtures.FakeLogger(
level=TEST_DEFAULT_LOGLEVELS[base],
name=base, format=_LOG_FORMAT))
elif base != 'bilean':
self.useFixture(fixtures.FakeLogger(
name=base, format=_LOG_FORMAT))
class BileanTestCase(testscenarios.WithScenarios,
testtools.TestCase, FakeLogMixin):
def setUp(self):
super(BileanTestCase, self).setUp()
self.setup_logging()
self.useFixture(fixtures.MonkeyPatch(
'bilean.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS',
True))
messaging.setup("fake://", optional=True)
self.addCleanup(messaging.cleanup)
utils.setup_dummy_db()
self.addCleanup(utils.reset_dummy_db)
def patchobject(self, obj, attr, **kwargs):
mockfixture = self.useFixture(mockpatch.PatchObject(obj, attr,
**kwargs))
return mockfixture.mock
# NOTE(pshchelo): this overrides the testtools.TestCase.patch method
# that does simple monkey-patching in favor of mock's patching
def patch(self, target, **kwargs):
mockfixture = self.useFixture(mockpatch.Patch(target, **kwargs))
return mockfixture.mock
|
Python
| 0.000006
|
@@ -588,39 +588,8 @@
res%0A
-from oslotest import mockpatch%0A
impo
@@ -2386,34 +2386,37 @@
.useFixture(
-mockpatch.
+fixtures.Mock
PatchObject(
@@ -2426,16 +2426,19 @@
, attr,%0A
+
@@ -2754,18 +2754,21 @@
ure(
-mockpatch.
+fixtures.Mock
Patc
|
4426526fa1b7e9dac86d4252a3b163dc2c6325cb
|
Fix APITaxi2 routing
|
APITaxi/__init__.py
|
APITaxi/__init__.py
|
# -*- coding: utf-8 -*-
import os
import re
from flask import Flask, g, request_started, request_finished
from flask_restplus import abort
from flask_uploads import configure_uploads
from APITaxi_models import db, security, HailLog
from APITaxi_utils.login_manager import init_app as init_login_manager
from APITaxi_utils.version import check_version, add_version_header
from . import api, tasks
from .api.extensions import documents
from .commands.warm_up_redis import warm_up_redis_func
from .extensions import redis_store, redis_store_saved, user_datastore
from APITaxi2 import create_app as create_new_app
__author__ = 'Vincent Lara'
__contact__ = "vincent.lara@data.gouv.fr"
__homepage__ = "https://github.com/"
__version__ = '0.1.0'
__doc__ = 'Flask application to serve APITaxi'
def load_configuration(app):
"""Load application configuration:
- from default_settings.py
- from the settings file set in the environment variable
APITAXI_CONFIG_FILE
- from variables set in environment
"""
app.config.from_object('APITaxi.default_settings')
if 'APITAXI_CONFIG_FILE' in os.environ:
app.config.from_envvar('APITAXI_CONFIG_FILE')
if 'ENV' not in app.config:
raise ValueError('Configuration variable ENV is required')
valid_env = ('PROD', 'STAGING', 'DEV')
if app.config['ENV'] not in valid_env:
raise ValueError('ENV {} invalid, must be any of {}'.format(
app.config['ENV'], valid_env
))
for param in app.config:
if param not in os.environ:
continue
app.config[param] = os.environ[param]
app.url_map.strict_slashes = False
def print_url_map(url_map):
for rule in sorted(url_map.iter_rules(), key=lambda r: r.rule):
methods = [m for m in rule.methods if m not in('OPTIONS', 'HEAD')]
print(('\t%-45s -> %s' % (rule.rule, ', '.join(methods))))
def unauthorized():
"""By default, @login_required returns HTTP/301 HTML response to redirect
the browser to the login form. Since we are in the context of an API,
clients are expecting a JSON response.
"""
abort(401, error='You are not logged in. Please provide a valid X-Api-Key header.')
def create_legacy_app():
app = Flask(__name__)
load_configuration(app)
db.init_app(app)
redis_store.init_app(app)
redis_store_saved.init_app(app)
api.init_app(app)
request_started.connect(check_version, app)
request_finished.connect(add_version_header, app)
configure_uploads(app, (documents,))
init_login_manager(app, user_datastore, None)
app.login_manager.unauthorized_handler(unauthorized)
tasks.init_app(app)
user_datastore.init_app(db, security.User, security.Role)
@app.before_first_request
def warm_up_redis():
warm_up_redis_func(app, db, security.User, redis_store)
def delete_redis_keys(response):
if not hasattr(g, 'keys_to_delete'):
return response
redis_store.delete(*g.keys_to_delete)
return response
app.after_request_funcs.setdefault(None, []).append(
HailLog.after_request(redis_store_saved)
)
app.after_request_funcs.setdefault(None, []).append(
delete_redis_keys
)
# Only display url_map if debug and from the worker thread.
if app.debug and os.environ.get('WERKZEUG_RUN_MAIN'):
print_url_map(app.url_map)
return app
class RegexpDispatcherMiddleware:
""" Forwards requests to applications depending on method and path.
"""
def __init__(self, app, overrides=None):
self.app = app
self.overrides = overrides or {}
def __call__(self, environ, start_response):
for override in self.overrides.values():
if (
re.match(override['regexp'], environ['PATH_INFO'])
and ('methods' not in override
or environ['REQUEST_METHOD'] in override['methods'])
):
return override['app'](environ, start_response)
return self.app(environ, start_response)
def create_app(proxy_v2=True):
"""Forward all requests to legacy application, except for routes that have
been updated on the new API."""
legacy_app = create_legacy_app()
if proxy_v2:
new_app = create_new_app()
legacy_app.wsgi_app = RegexpDispatcherMiddleware(legacy_app.wsgi_app, {
'Customers': {
'regexp': r'^/customers/.*$',
'app': new_app,
},
'Users': {
'regexp': r'^/users(/.*)?$',
'app': new_app,
},
'Drivers': {
'regexp': r'^/drivers(/.*)?$',
'app': new_app,
},
'ADS': {
'regexp': r'^/ads(/.*)?$',
'app': new_app,
},
'GET /taxis/:id': {
'regexp': r'^/taxis2/.+$',
'app': new_app,
'methods': ['GET']
},
})
return legacy_app
|
Python
| 0.000014
|
@@ -4929,17 +4929,16 @@
'%5E/taxis
-2
/.+$',%0A
|
ca5c3648ad5f28090c09ecbbc0e008c51a4ce708
|
Add a new dev (optional) parameter and use it
|
bin/push/silent_ios_push.py
|
bin/push/silent_ios_push.py
|
import json
import logging
import argparse
import emission.net.ext_service.push.notify_usage as pnu
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(prog="silent_ios_push")
parser.add_argument("interval",
help="specify the sync interval that the phones have subscribed to",
type=int)
args = parser.parse_args()
logging.debug("About to send notification to phones with interval %d" % args.interval)
response = pnu.send_silent_notification_to_ios_with_interval(args.interval, dev=True)
pnu.display_response(response)
|
Python
| 0.000001
|
@@ -358,16 +358,91 @@
ype=int)
+%0A parser.add_argument(%22-d%22, %22--dev%22, action=%22store_true%22, default=False)
%0A%0A ar
@@ -645,12 +645,16 @@
dev=
-True
+args.dev
)%0A
|
d37a1d333f00f7cd060734fce1c05f1434938bce
|
refactor connection and channel
|
kuyruk/__init__.py
|
kuyruk/__init__.py
|
from __future__ import absolute_import
import errno
import select
import logging
from contextlib import contextmanager
import pika
import pika.exceptions
from kuyruk import exceptions
from kuyruk.task import Task
from kuyruk.queue import Queue
from kuyruk.config import Config
from kuyruk.worker import Worker
from kuyruk.events import EventMixin
__version__ = '0.17.1'
try:
# not available in python 2.6
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
# Add NullHandler to prevent logging warnings on startup
null_handler = NullHandler()
logger.addHandler(null_handler)
# Pika should do this. Patch submitted to Pika.
logging.getLogger('pika').addHandler(null_handler)
# Monkey patch ReadPoller until pika 0.9.14 is released
def ready(self):
while True:
try:
return original_method(self)
break
except select.error as e:
if e[0] != errno.EINTR:
raise
from pika.adapters.blocking_connection import ReadPoller
original_method = ReadPoller.ready
ReadPoller.ready = ready
class Kuyruk(EventMixin):
"""
Main class for Kuyruk distributed task queue. It holds the configuration
values and provides a task decorator for user application
:param config: A module that contains configuration options.
See :ref:`configuration-options` for default values.
"""
Reject = exceptions.Reject # Shortcut for raising from tasks
def __init__(self, config=None, task_class=Task):
self.task_class = task_class
self.config = Config()
self._connection = None
self._channel = None
if config:
self.config.from_object(config)
def task(self, queue='kuyruk', eager=False, retry=0, task_class=None,
max_run_time=None, local=False, arg_class=None):
"""
Wrap functions with this decorator to convert them to background
tasks. After wrapping, calling the function will send a message to
queue instead of running the function.
:param queue: Queue name for the tasks.
:param eager: Run task in process, do not use RabbitMQ.
:param retry: Retry this times before give up.
:param task_class: Custom task class.
Must be a subclass of :class:`~Task`.
If this is :const:`None` then :attr:`Task.task_class` will be used.
:param max_run_time: Maximum allowed time in seconds for task to
complete.
:param arg_class: Class of the first argument. If it is present,
the first argument will be converted to it's ``id`` when sending the
task to the queue and it will be reloaded on worker when running
the task.
:return: Callable :class:`~Task` object wrapping the original function.
"""
def decorator():
def inner(f):
# Function may be wrapped with no-arg decorator
queue_ = 'kuyruk' if callable(queue) else queue
task_class_ = task_class or self.task_class
return task_class_(
f, self,
queue=queue_, eager=eager, local=local, retry=retry,
max_run_time=max_run_time, arg_class=arg_class)
return inner
if callable(queue):
logger.debug('task without args')
return decorator()(queue)
else:
logger.debug('task with args')
return decorator()
def connection(self):
"""
Returns the shared RabbitMQ connection.
Creates a new connection if it is not connected.
"""
if self._connection is None or not self._connection.is_open:
self._connection = self._connect()
return self._connection
def _connect(self):
"""Returns new connection object."""
parameters = pika.ConnectionParameters(
host=self.config.RABBIT_HOST,
port=self.config.RABBIT_PORT,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
credentials=pika.PlainCredentials(
self.config.RABBIT_USER,
self.config.RABBIT_PASSWORD),
heartbeat_interval=0, # We don't want heartbeats
socket_timeout=2,
connection_attempts=2)
connection = pika.BlockingConnection(parameters)
logger.info('Connected to RabbitMQ')
return connection
def channel(self):
"""
Returns the shared channel.
Creates a new channel if there is no available.
"""
if self._channel is None or not self._channel.is_open:
self._channel = self._open_channel()
return self._channel
def _open_channel(self):
"""Returns a new channel."""
CLOSED = (pika.exceptions.ConnectionClosed,
pika.exceptions.ChannelClosed)
try:
return self.connection().channel()
except CLOSED:
logger.warning("Connection is closed. Reconnecting...")
try:
self._connection.close()
except CLOSED:
logger.debug("Connection is already closed.")
self._connection = self._connect()
return self._connection.channel()
def close(self):
if self._connection is not None:
if self._connection.is_open:
self._connection.close()
|
Python
| 0.000001
|
@@ -77,46 +77,8 @@
ging
-%0Afrom contextlib import contextmanager
%0A%0Aim
@@ -4408,32 +4408,81 @@
mpts=2)%0A
+from kuyruk.connection import Connection%0A
connection = pik
@@ -4482,21 +4482,8 @@
n =
-pika.Blocking
Conn
@@ -5045,30 +5045,33 @@
-return
+channel =
self.connec
@@ -5283,53 +5283,12 @@
-logger.debug(%22Connection is already closed.%22)
+pass
%0A%0A
@@ -5336,38 +5336,41 @@
t()%0A
-return
+channel =
self._connectio
@@ -5378,24 +5378,48 @@
.channel()%0A%0A
+ return channel%0A%0A
def clos
|
a33b8222959cc14a4c89658e6d7aa6ff07f27c0c
|
remove commented code
|
ephypype/import_ctf.py
|
ephypype/import_ctf.py
|
"""Import ctf."""
# -------------------- nodes (Function)
def convert_ds_to_raw_fif(ds_file):
"""CTF .ds to .fif and save result in pipeline folder structure."""
import os
import os.path as op
from nipype.utils.filemanip import split_filename as split_f
from mne.io import read_raw_ctf
_, basename, ext = split_f(ds_file)
# print(subj_path, basename, ext)
raw = read_raw_ctf(ds_file)
# raw_fif_file = os.path.abspath(basename + "_raw.fif")
# raw.save(raw_fif_file)
# return raw_fif_file
raw_fif_file = os.path.abspath(basename + "_raw.fif")
if not op.isfile(raw_fif_file):
raw = read_raw_ctf(ds_file)
raw.save(raw_fif_file)
else:
print(('*** RAW FIF file %s exists!!!' % raw_fif_file))
return raw_fif_file
|
Python
| 0
|
@@ -416,124 +416,8 @@
ile)
-%0A # raw_fif_file = os.path.abspath(basename + %22_raw.fif%22)%0A%0A # raw.save(raw_fif_file)%0A # return raw_fif_file
%0A%0A
|
ae09f2e330e6c2241e79c0d62929791479c9dd03
|
Add method to save the team
|
packages/base/webpages/main_page.py
|
packages/base/webpages/main_page.py
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
from gnr.core.gnrbag import Bag
from gnr.core.gnrdecorator import public_method
class GnrCustomWebPage(object):
css_requires = 'css/base'
js_requires = 'js/main_page'
def main(self, root, **kwargs):
frame = root.framePane()
sc = frame.center.stackContainer(selected='^page_selected')
self.team_page(sc.contentPane(title='!!Teams', datapath='teams'))
self.board_page(sc.contentPane(title='!!Board', datapath='board'))
frame.top.slotToolbar('*,stackButtons,*', _class='page_slotbar')
sc.data('^page_selected', 0)
def team_page(self, pane):
pane.attributes.update({'background_color': 'white'})
qs = self.get_teams_boards()
pane.data('^teams', qs)
for r in qs:
team_id = r.getLabel()
values = r.getValue()
team_div = pane.div(
'^.{0}?team_name'.format(team_id),
_class='team-title'
).ul(_class='board-list', nodeId=team_id, id=team_id)
if values:
for v in values:
board_id = v.getValue().getItem('pkey')
# Set the board_id as attribute so i can use to his list etc..
team_div.li(
board_id=board_id ,
_class='board-list-item',
connect_onclick="""
// call a function to generate the board page
generate_board_page(this);
""",
).div(
'^.{0}.{1}.name'.format(team_id, board_id),
_class='board-tile'
)
# Button to create a new board
team_div.li(
id='create_new_board_' + team_id,
_class='board-list-item',
connect_onclick="create_new_board(this);"
).div(
'!!+ Create new board...',
_class='board-tile create-new-board'
)
pane.div(
id='create_new_team', _class='team-title',
connect_onclick="create_new_team(this);"
).div('!!Add new team')
def board_page(self, pane):
# Entry point of the board page.
pane.div(
id='board_page', nodeId='board_page',
_class='board-page'
)
@public_method
def get_teams_boards(self):
"""Gets a bag with team and boards"""
user_id = self.dbCurrentEnv()['user_id']
tbl = self.db.table('base.team')
teams_qs = tbl.query(
where='$owner_user_id=:user_id',
user_id=user_id,
order_by='$__ins_ts'
).fetch()
tbl = self.db.table('base.board')
boards_qs = tbl.query(
'$name,$team_id',
where='$owner_user_id=:user_id',
user_id=user_id,
order_by='$position'
).fetch()
result = Bag()
for t in teams_qs:
result.setAttr(t['id'], team_name=t['name'])
for b in boards_qs:
result.setItem('{0}.{1}'.format(b['team_id'], b['pkey']), Bag(b))
return result
@public_method
def get_lists_cards(self, board_id):
"""Gets a bag with lists and cards"""
tbl = self.db.table('base.list')
lists_qs = tbl.query(
where='$board_id=:board_id',
board_id=board_id,
order_by='$position'
).fetch()
tbl = self.db.table('base.card')
cards_qs = tbl.query(
'$name,$description,$position,$list_name,$list_id',
where='$list_board_id=:board_id',
board_id=board_id,
order_by='$position'
).fetch()
result = Bag()
for lst in lists_qs:
result.setItem(lst['id'], Bag(), list_name=lst['name'])
for crd in cards_qs:
result.setItem('{0}.{1}'.format(crd['list_id'], crd['pkey']), Bag(crd))
return result
@public_method
def save_card(self, list_id, card_name):
"""Save the card """
tbl = self.db.table('base.card')
card = {
'name': card_name,
'list_id': list_id
}
tbl.insert(card)
tbl.db.commit()
return Bag(card)
@public_method
def edit_list_name(self, list_id, board_id, value):
"""Save name of a list after the field has been edited"""
value = value.strip()
if not value:
return False
tbl = self.db.table('base.list')
tbl.update({'id': list_id, 'name': value,
'board_id': board_id})
tbl.db.commit()
return True
@public_method
def add_new_list(self, board_id, value):
"""Add new list"""
value = value.strip()
if not value:
return False
tbl = self.db.table('base.list')
new_list = {'name': value,
'board_id': board_id}
tbl.insert(new_list)
tbl.db.commit()
return Bag(new_list)
@public_method
def add_board(self, name, description, position, team_id):
"""Add a new board"""
values = {
'name': name,
'description': description,
'position': position,
'team_id': team_id,
'owner_user_id': self.dbCurrentEnv()['user_id']
}
tbl = self.db.table('base.board')
tbl.insert(values)
tbl.db.commit()
return Bag(values)
|
Python
| 0
|
@@ -701,19 +701,126 @@
'white'
+,%0A 'nodeId': 'team_page',%0A 'id': 'team_page'
%7D)%0A
+%0A
@@ -5686,16 +5686,354 @@
urn Bag(values)%0A
+%0A @public_method%0A def add_team(self, name, description):%0A values = %7B%0A 'name': name,%0A 'description': description,%0A 'owner_user_id': self.dbCurrentEnv()%5B'user_id'%5D%0A %7D%0A tbl = self.db.table('base.team')%0A tbl.insert(values)%0A tbl.db.commit()%0A%0A return Bag(values)%0A
|
697d3c4c80574d82e8aa37e2a13cbaeefdad255c
|
bump version
|
kuyruk/__init__.py
|
kuyruk/__init__.py
|
from __future__ import absolute_import
import logging
from kuyruk.kuyruk import Kuyruk
from kuyruk.worker import Worker
from kuyruk.task import Task
from kuyruk.config import Config
__version__ = '0.13.1'
try:
# not available in python 2.6
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add NullHandler to prevent logging warnings on startup
null_handler = NullHandler()
logging.getLogger('kuyruk').addHandler(null_handler)
logging.getLogger('pika').addHandler(null_handler)
|
Python
| 0
|
@@ -197,17 +197,17 @@
= '0.13.
-1
+2
'%0A%0Atry:%0A
|
50b189888a0ff68f1cc4db1615991d1afe364854
|
Update cigar_party.py
|
Python/Logic_1/cigar_party.py
|
Python/Logic_1/cigar_party.py
|
# When squirrels get together for a party, they like to have cigars. A squirrel
# party is successful when the number of cigars is between 40 and 60, inclusive.
# Unless it is the weekend, in which case there is no upper bound on the number
# of cigars. Return True if the party with the given values is successful, or
# False otherwise.
# cigar_party(30, False) → False
# cigar_party(50, False) → True
# cigar_party(70, True) → True
def cigar_party(cigars, is_weekend):
return (is_weekend and cigars >= 40) or (39 < cigars < 61)
print(cigar_party(30, False))
print(cigar_party(50, False))
print(cigar_party(70, True))
|
Python
| 0.000002
|
@@ -358,17 +358,19 @@
False)
-%E2%86%92
+--%3E
False%0A#
@@ -393,17 +393,19 @@
False)
-%E2%86%92
+--%3E
True%0A#
@@ -430,9 +430,11 @@
ue)
-%E2%86%92
+--%3E
Tru
|
ae3db425bbe4c82c95d71430a0fa9732f8e1e26f
|
fix for issue mentioned in previous commit.
|
labware/liquids.py
|
labware/liquids.py
|
class LiquidContainer():
max_volume = None
min_working_volume = None
max_working_volume = None
"""
A dict containing the liquid key (a user-specified name) along with
the amount of that particular liquid in this blend.
"""
_contents = None # {}
def __init__(self, max=None, min_working=None, max_working=None, ml=False):
""" Initialize and set working volumes. """
"""
I guess ideally, you'd have a subclass to define the working volumes
of specific containers, but that would get really silly really fast.
Better to just specify that on the container because that's how the
standards and defined and (as far as I know) they end up being uniform
for all wells.
If the rare situation where this isn't the case, you can just subclass
this.
"""
self._contents = {}
if max:
self.max_volume = self.convert_ml(max, ml)
if min_working:
self.min_working_volume = self.convert_ml(min_working, ml)
if max_working:
self.max_working_volume = self.convert_ml(max_working, ml)
def add_liquid(self, ml=False, **kwargs):
"""
You provide as keyword arguments liquid names and volumes in
microliters.
If you want it in milliliters, you can set ml=True in the
kwargs.
400ul of water:
container.add_liquid(water=400)
400ml of water:
container.add_liquid(water=400, ml=True)
TODO: Attach to a global ingredients list to ensure that all
specified liquids have been defined. For now, just
be careful.
"""
# Tally up the new liquids to make sure we can fit them into
# the container.
new_liquid = 0
for liquid in kwargs:
new_liquid = new_liquid + kwargs[liquid]
new_liquid = self.convert_ml(new_liquid, ml)
self.assert_capacity(new_liquid)
# assert_capacity will raise an error if the value is out of
# bounds, so now we can add our liquids.
for liquid in kwargs:
vol = self.convert_ml(kwargs[liquid])
if liquid in self._contents:
self._contents[liquid] = self._contents[liquid]+vol
else:
self._contents[liquid] = vol
def assert_capacity(self, new_amount, ml=False):
if not self.max_volume:
return
new_value = self.calculate_total_volume()+new_amount
if (new_value > self.max_volume):
raise VolumeError(
"Liquid amount ({}{}l) exceeds max volume ({}{}l)."\
.format(new_value, u'\u03BC', self.max_volume, u'\u03BC')
)
def calculate_total_volume(self, data=None):
total = 0
liqs = data or self._contents
for l in liqs:
total = total + liqs[l]
return total
def convert_ml(self, volume, ml=None):
"""
Simple utility method to allow input of ul volume and multiply by
a thousand if ml is set to True.
Python doesn't support passing by reference. ;_;
"""
if ml is None:
raise Exception("Keyword argument 'ml' is required.")
elif ml:
return volume*1000 # OMG metric <3 <3
else:
return volume
def get_volume(self):
return self.calculate_total_volume()
def get_proportion(self, key):
if key in self._contents:
return self._contents[key]/self.calculate_total_volume()
else:
raise KeyError(
"Liquid '{}' not found in this container."\
.format(key)
)
def transfer(self, amount, destination, ml=False):
amount = self.convert_ml(amount, ml)
# Ensure there's room in the destination well first.
destination.assert_capacity(amount)
# Ensure we have enough total volume to proceed with the
# request. (Don't worry about working volumes for now.)
total_volume = self.calculate_total_volume()
if (total_volume<amount):
raise ValueError(
"Not enough liquid ({}{}l) for transfer ({}{}l)."\
.format(total_volume, u'\u03BC', amount, u'\u03BC')
)
# Proportion math.
mix = {}
liq = self._contents
for l in liq:
proportion = liq[l]/total_volume
value = proportion*amount
# TODO: alternate add_liquid syntax for dynamic things like this.
kwargs = {}
kwargs[l] = value
self._contents[l] = self._contents[l]-value
destination.add_liquid(**kwargs)
class VolumeError(ValueError):
pass
|
Python
| 0
|
@@ -1903,16 +1903,20 @@
%5Bliquid%5D
+, ml
)%0A%09%09%09if
|
4d3d4e457c5886ace69250de1c5f4f696604d43b
|
Fix cal_seqs with no delay
|
QGL/BasicSequences/helpers.py
|
QGL/BasicSequences/helpers.py
|
# coding=utf-8
from itertools import product
import operator
from ..PulsePrimitives import Id, X, MEAS
from ..ControlFlow import qwait
from functools import reduce
def create_cal_seqs(qubits, numRepeats, measChans=None, waitcmp=False, delay=None):
"""
Helper function to create a set of calibration sequences.
Parameters
----------
qubits : logical channels, e.g. (q1,) or (q1,q2) (tuple)
numRepeats = number of times to repeat calibration sequences (int)
waitcmp = True if the sequence contains branching
delay: optional time between state preparation and measurement (s)
"""
if measChans is None:
measChans = qubits
calSet = [Id, X]
#Make all combination for qubit calibration states for n qubits and repeat
calSeqs = [reduce(operator.mul, [p(q) for p, q in zip(pulseSet, qubits)])
for pulseSet in product(calSet, repeat=len(qubits))
for _ in range(numRepeats)]
#Add on the measurement operator.
measBlock = reduce(operator.mul, [MEAS(q) for q in qubits])
return [[seq, Id(qubits[0], delay), measBlock, qwait('CMP')] if waitcmp else [seq, Id(qubits[0], delay), measBlock]
for seq in calSeqs]
def cal_descriptor(qubits, numRepeats):
states = ['0', '1']
# generate state set in same order as we do above in create_cal_seqs()
state_set = [reduce(operator.add, s) for s in product(states, repeat=len(qubits))]
descriptor = {
'name': 'calibration',
'unit': 'state',
'partition': 2,
'points': []
}
for state in state_set:
descriptor['points'] += [state] * numRepeats
return descriptor
def time_descriptor(times, desired_units="us"):
if desired_units == "s":
scale = 1
elif desired_units == "ms":
scale = 1e3
elif desired_units == "us" or desired_units == u"μs":
scale = 1e6
elif desired_units == "ns":
scale = 1e9
axis_descriptor = {
'name': 'time',
'unit': desired_units,
'points': list(scale * times),
'partition': 1
}
return axis_descriptor
|
Python
| 0.000002
|
@@ -749,17 +749,18 @@
%0A cal
-S
+_s
eqs = %5Br
@@ -1042,155 +1042,248 @@
-return %5B%5Bseq, Id(qubits%5B0%5D, delay), measBlock, qwait('CMP')%5D if waitcmp else %5Bseq, Id(qubits%5B0%5D, delay), measBlock%5D%0A for
+#Add optional delay%0A full_cal_seqs = %5B%5Bseq, Id(qubits%5B0%5D, delay), measBlock%5D if delay else %5Bseq, measBlock%5D for seq in cal_seqs%5D%0A if waitcmp:%0A %5Bcal_seq.append(qwait('CMP')) for cal_
seq in
+full_
cal
-S
+_s
eqs%5D
+%0A return full_cal_seqs
%0A%0Ade
|
214e9bfd10c153b5ef27ff924da12a299b54874c
|
Deprecate gettattr req. in theories
|
lamana/theories.py
|
lamana/theories.py
|
# -----------------------------------------------------------------------------
'''A interface between constructs and models modules.'''
# BaseModel(): A super class for user-customized models.
# flake8 constructs.py --ignore=E265,E501,N802,H806
import abc
import importlib
import logging
import six
# TODO: Replace with interactive way to import models
from lamana.models import *
##import lamana.models
from lamana.utils import tools as ut
@six.add_metaclass(abc.ABCMeta)
class BaseModel(object):
'''Provide attributes for sub-classing custom models.
Sub-class from here to make a model that interfaces with LamAna.
Notes
-----
This class helps centralize common attributes associated with a custom
model. A model is selected in by the `Case.apply()` method and is applied in
Phase 3 of `constructs.Laminate()`. It is idiomatic to subclass `BaseModel`
when making custom models.
Uses an abstractmethod to enforce implementation of a hook method. It also
suggests that BaseModel can only be sub-classed, not instantiated.
See Also
--------
distributions.Case.apply(): model selection
constructs.Laminate(): creates LMFrame by merging LT data with LFrame
theories.handshake(): get updated DataFrame and FeatureInput
'''
def __init__(self):
super(BaseModel, self).__init__()
self.model_name = None
self.LaminateModel = None
self.FeatureInput = None
def __repr__(self):
return '<{} Model object>'.format(self.__class__.__name__)
# TODO: Find `_use_model_` more dynamically than forcing to search `Model._use_model`.
@abc.abstractmethod
def _use_model_():
'''Hook method.
Same name as config.HOOKNAME. This method must be implemented and
return the following.
Returns
-------
tuple
Updated (DataFrame, FeatureInput).
'''
pass
def handshake(Laminate, adjusted_z=False):
'''Return updated LaminateModel and FeatureInput objects.
This key method interfaces between Laminate class and a custom model module.
Model names are related to the laminate theory, e.g. Classical_LT, Wilson_LT.
This name is applied by the user upon calling `Case.apply(model='model_name')`
and is found in the FeatureInput dict.
Parameters
----------
Laminate : Laminate object
The entire pre-updated, `Laminate` object is passed in, giving access
to its methods. `Laminate.LFrame` only has ID and Dimensional columns,
so no laminate theory calculations or columns are populated yet.
adjusted_z : bool, optional; default False
This option forces the use of the z(m)* column values. A different
algorithm was used it calculate the internal values for this variable.
Notes
-----
This method searches for a special hook method named `_use_model_` in the
specified model module. This hook may be written as method within a class
or written as an independent function offering a choice for users to write
models in either class-style (recommended) or function-style. This hook
method simply returns an updated LaminateModel and FeatureInput.
`handshake()` determines either class- or function-style models by duck typing
a module name according to the model name provided in the `Case.apply()`` method.
These modules are located in the `models` directory - a repository for all
package models. Assuming a function first, the hook method is sought by
calling `_use_model_()`; if none is found, the exception is caught and a hook
method named `<class>._use_model_` is sought next (as of 0.4.5b1). As of
0.4.11, hardcoded class names here removed; any pythonic name can be used
for the <class> parameter.
Here is the workflow of a model selected by the user and its call in `Laminate`.
This assumes the model is developed and located in the standard .\models directory.
1. User selects a model in `lamana.distributions.Case.apply(model='model_name')`;
Model name is stored in the FeatureInput object then passed into `Laminate`.
2. Inside `lamana.constructs.Laminate`, _update_columns._update_calculations(FI)`
is called, which initiates LT calculations for the given model.
3. `theories.handshake(L)` is called and searches for the model name in models dir.
4. Inside the selected model, the hook method `models._use_model_` is called
(if function-style) or `models.<model_name>._use_model_` (if class-style)
'''
HOOKNAME = '_use_model_' # looks for this in custom models
# Find and Import the Model
model_name = Laminate.FeatureInput['Model']
modified_name = ''.join(['.', model_name]) # e.g '.Wilson_LT'
module = importlib.import_module(modified_name, package='lamana.models')
# Verify Laminate has an empty LMFrame attribute, otherwise Warn
if any(getattr(Laminate, 'LMFrame')):
logging.warn('A LaminateModel has been passed to `handshake()`.'
' Expected a pre-updated Laminate object. Results are unpredictable.')
try:
# Look for a hook function
hook = ut.get_hook_function(module, hookname=HOOKNAME)
logging.debug('Found a hook function: {}'.format(hook))
except(AttributeError):
# This code block obviates hardcoding a specific model class name.
# Look for a class containing the hook method
class_obj = ut.get_hook_class(module, hookname=HOOKNAME)
class_name = getattr(module, class_obj.__name__)
my_instance = class_name() # instantiate the class; important
hook = getattr(my_instance, HOOKNAME) # hook method
logging.debug('Found a hook method {} in {}'.format(hook, class_name))
# TODO: Add following with args
#LaminateModel, FeatureInput = hook(Laminate, *args, adjusted_z=False, **kwargs)
# eqiv. LM, FI = module._use_model_()
LaminateModel, FeatureInput = hook(Laminate, adjusted_z=False)
# Make sure the passed FeatureInput has Equal attributes
##assert FeatureInput['Parameters']['p'] == Laminate.p
return(LaminateModel, FeatureInput)
|
Python
| 0.999997
|
@@ -1933,16 +1933,81 @@
pass
+ # pragma: no cover
%0A%0A%0Adef h
@@ -5053,16 +5053,18 @@
Warn%0A
+ #
if any(
@@ -5093,24 +5093,26 @@
rame')):%0A
+ #
logging
@@ -5168,24 +5168,26 @@
ake()%60.'%0A
+ #
|
3567348e84034dc21378ab2d657bcb7a54a49502
|
Terminate the server before tearing down the databases.
|
protractor/management/commands/protractor.py
|
protractor/management/commands/protractor.py
|
# -*- coding: utf-8 -*-
import os
import sys
from multiprocessing import Process
from optparse import make_option
import subprocess
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import connection
from django.test.runner import setup_databases
class Command(BaseCommand):
args = '[--protractor-conf] [--runserver-command] [--specs] [--suite] [--addrport]'
help = 'Run protractor tests with a test database'
option_list = BaseCommand.option_list + (
make_option('--protractor-conf',
action='store',
dest='protractor_conf',
default='protractor.conf.js',
help='Specify a destination for your protractor configuration'
),
make_option('--runserver-command',
action='store',
dest='run_server_command',
default='runserver',
help='Specify which command you want to run a server'
),
make_option('--specs',
action='store',
dest='specs',
help='Specify which specs to run'
),
make_option('--suite',
action='store',
dest='suite',
help='Specify which suite to run'
),
make_option('--fixture',
action='append',
dest='fixtures',
help='Specify fixture to load initial data to the database'
),
make_option('--addrport', action='store', dest='addrport',
type='string',
help='port number or ipaddr:port to run the server on'),
)
def handle(self, *args, **options):
options['verbosity'] = int(options.get('verbosity'))
if not os.path.exists(options['protractor_conf']):
raise IOError("Could not find '{}'"
.format(options['protractor_conf']))
self.run_webdriver()
old_config = self.setup_databases(options)
fixtures = options['fixtures']
if fixtures:
call_command('loaddata', *fixtures,
**{'verbosity': options['verbosity']})
if options['addrport'] is None:
options['addrport'] = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', '8081')
test_server_process = Process(target=self.runserver, args=(options,))
test_server_process.daemon = True
test_server_process.start()
authority = options['addrport']
if ':' not in authority:
authority = 'localhost:' + authority
live_server_url = 'http://%s' % authority
params = {
'live_server_url': live_server_url
}
protractor_command = 'protractor {}'.format(options['protractor_conf'])
protractor_command += ' --baseUrl {}'.format(live_server_url)
if options['specs']:
protractor_command += ' --specs {}'.format(options['specs'])
if options['suite']:
protractor_command += ' --suite {}'.format(options['suite'])
for key, value in params.items():
protractor_command += ' --params.{key}={value}'.format(
key=key, value=value
)
return_code = subprocess.call(protractor_command.split())
self.teardown_databases(old_config, options)
if return_code:
self.stdout.write('Failed')
sys.exit(1)
else:
self.stdout.write('Success')
def setup_databases(self, options):
return setup_databases(options['verbosity'], False)
def teardown_databases(self, old_config, options):
"""
Destroys all the non-mirror databases.
"""
if len(old_config) > 1:
old_names, mirrors = old_config
else:
old_names = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, options['verbosity'])
def runserver(self, options):
use_threading = connection.features.test_db_allows_multiple_connections
self.stdout.write('Starting server...')
call_command(
options['run_server_command'],
addrport=options.get('addrport'),
shutdown_message='',
use_reloader=False,
use_ipv6=False,
verbosity=0,
use_threading=use_threading,
stdout=open(os.devnull, 'w')
)
def run_webdriver(self):
self.stdout.write('Starting webdriver...')
with open(os.devnull, 'w') as f:
subprocess.call(['webdriver-manager', 'update'], stdout=f, stderr=f)
subprocess.Popen(['webdriver-manager', 'start'], stdout=f, stderr=f)
|
Python
| 0
|
@@ -3270,16 +3270,253 @@
plit())%0A
+%0A # Terminate the live server process before tearing down the databases%0A # to prevent the error%0A # django.db.utils.OperationalError: database is being accessed by other users%0A test_server_process.terminate()%0A%0A
|
22f6ecc5b61dae0a638b4191eb2ae3ddf1b13895
|
fix organization events don't have repository/owner/login
|
bioconda_utils/bot/views.py
|
bioconda_utils/bot/views.py
|
"""
HTTP Views (pages)
"""
import logging
from aiohttp import web
from .events import event_routes
from ..githubhandler import Event
from .. import __version__ as VERSION
from .worker import celery
from .config import APP_SECRET
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
web_routes = web.RouteTableDef() # pylint: disable=invalid-name
@web_routes.post('/_gh')
async def webhook_dispatch(request):
"""Accepts webhooks from Github and dispatches them to event handlers"""
try:
body = await request.read()
secret = APP_SECRET
if secret == "IGNORE":
logger.error("IGNORING WEBHOOK SECRET (DEBUG MODE)")
secret = None
event = Event.from_http(request.headers, body, secret=secret)
# Respond to liveness check
if event.event == "ping":
return web.Response(status=200)
# Log Event
installation = event.get('installation/id')
to_user = event.get('repository/owner/login')
to_repo = event.get('repository/name')
action = event.get('action', None)
action_msg = '/' + action if action else ''
logger.info("Received GH Event '%s%s' (%s) for %s (%s/%s)",
event.event, action_msg,
event.delivery_id,
installation, to_user, to_repo)
# Get GithubAPI object for this installation
ghapi = await request.app['ghappapi'].get_github_api(
dry_run=False, installation=installation, to_user=to_user, to_repo=to_repo
)
# Dispatch the Event
try:
await event_routes.dispatch(event, ghapi)
logger.info("Event '%s%s' (%s) done", event.event, action_msg, event.delivery_id)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to dispatch %s", event.delivery_id)
request.app['gh_rate_limit'] = ghapi.rate_limit
try:
events_remaining = ghapi.rate_limit.remaining
except AttributeError:
events_remaining = "Unknown"
logger.info('GH requests remaining: %s', events_remaining)
return web.Response(status=200)
except Exception: # pylint: disable=broad-except
logger.exception("Failure in webhook dispatch")
return web.Response(status=500)
@web_routes.get("/")
async def show_status(request):
"""Shows the index page
This is rendered at eg https://bioconda.herokuapps.com/
"""
try:
logger.info("Status: getting celery data")
msg = f"""
Running version {VERSION}
{request.app.get('gh_rate_limit')}
"""
worker_status = celery.control.inspect(timeout=0.1)
if not worker_status:
msg += """
no workers online
"""
else:
for worker in sorted(worker_status.ping().keys()):
active = worker_status.active(worker)
reserved = worker_status.reserved(worker)
msg += f"""
Worker: {worker}
active: {len(active[worker])}
queued: {len(reserved[worker])}
"""
return web.Response(text=msg)
except Exception: # pylint: disable=broad-except
logger.exception("Failure in show status")
return web.Response(status=500)
|
Python
| 0.000713
|
@@ -1005,16 +1005,22 @@
r/login'
+, None
)%0A
@@ -1058,16 +1058,22 @@
ry/name'
+, None
)%0A
|
667a87988d168a4dbd9b0d86267b445d91f1460b
|
Fix Daikin sensor temperature_unit & cleanup (#34116)
|
homeassistant/components/daikin/sensor.py
|
homeassistant/components/daikin/sensor.py
|
"""Support for Daikin AC sensors."""
import logging
from homeassistant.const import CONF_ICON, CONF_NAME, CONF_TYPE
from homeassistant.helpers.entity import Entity
from homeassistant.util.unit_system import UnitSystem
from . import DOMAIN as DAIKIN_DOMAIN
from .const import (
ATTR_INSIDE_TEMPERATURE,
ATTR_OUTSIDE_TEMPERATURE,
SENSOR_TYPE_TEMPERATURE,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up the Daikin sensors.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id)
sensors = [ATTR_INSIDE_TEMPERATURE]
if daikin_api.device.support_outside_temperature:
sensors.append(ATTR_OUTSIDE_TEMPERATURE)
async_add_entities(
[
DaikinClimateSensor(daikin_api, sensor, hass.config.units)
for sensor in sensors
]
)
class DaikinClimateSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, api, monitored_state, units: UnitSystem, name=None) -> None:
"""Initialize the sensor."""
self._api = api
self._sensor = SENSOR_TYPES.get(monitored_state)
if name is None:
name = f"{self._sensor[CONF_NAME]} {api.name}"
self._name = f"{name} {monitored_state.replace('_', ' ')}"
self._device_attribute = monitored_state
if self._sensor[CONF_TYPE] == SENSOR_TYPE_TEMPERATURE:
self._unit_of_measurement = units.temperature_unit
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.mac}-{self._device_attribute}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._device_attribute == ATTR_INSIDE_TEMPERATURE:
return self._api.device.inside_temperature
if self._device_attribute == ATTR_OUTSIDE_TEMPERATURE:
return self._api.device.outside_temperature
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
|
Python
| 0
|
@@ -100,25 +100,28 @@
F_NAME,
-CONF_TYPE
+TEMP_CELSIUS
%0Afrom ho
@@ -164,62 +164,8 @@
tity
-%0Afrom homeassistant.util.unit_system import UnitSystem
%0A%0Afr
@@ -219,22 +219,16 @@
import
-(%0A
ATTR_INS
@@ -243,20 +243,16 @@
ERATURE,
-%0A
ATTR_OU
@@ -261,36 +261,32 @@
IDE_TEMPERATURE,
-%0A
SENSOR_TYPE_TEM
@@ -285,41 +285,9 @@
TYPE
-_TEMPERATURE,%0A SENSOR_TYPES,%0A)
+S
%0A%0A_L
@@ -970,31 +970,9 @@
ies(
-%0A %5B%0A
+%5B
Daik
@@ -1009,40 +1009,9 @@
nsor
-, hass.config.units)%0A
+)
for
@@ -1032,23 +1032,9 @@
sors
-%0A %5D%0A
+%5D
)%0A%0A%0A
@@ -1154,38 +1154,8 @@
tate
-, units: UnitSystem, name=None
) -%3E
@@ -1257,21 +1257,17 @@
OR_TYPES
-.get(
+%5B
monitore
@@ -1273,17 +1273,17 @@
ed_state
-)
+%5D
%0A
@@ -1287,327 +1287,109 @@
-if name is None:%0A name = f%22%7Bself._sensor%5BCONF_NAME%5D%7D %7Bapi.name%7D%22%0A%0A self._name = f%22%7Bname%7D %7Bmonitored_state.replace('_', ' ')%7D%22%0A self._device_attribute = monitored_state%0A%0A if self._sensor%5BCONF_TYPE%5D == SENSOR_TYPE_TEMPERATURE:%0A self._unit_of_measurement = units.temperature_unit
+self._name = f%22%7Bapi.name%7D %7Bself._sensor%5BCONF_NAME%5D%7D%22%0A self._device_attribute = monitored_state
%0A%0A
@@ -2202,33 +2202,20 @@
urn
-self._unit_of_measurement
+TEMP_CELSIUS
%0A%0A
|
a3e7da8c515e3d10217ebc5bf708da13479ab80c
|
Fix FLAGS.volumes_dir help message
|
cinder/volume/iscsi.py
|
cinder/volume/iscsi.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper code for the iSCSI volume driver.
"""
import os
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
iscsi_helper_opt = [
cfg.StrOpt('iscsi_helper',
default='tgtadm',
help='iscsi target user-land tool to use'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configfuration file storage directory'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(iscsi_helper_opt)
class TargetAdmin(object):
"""iSCSI target administration.
Base class for iSCSI target admin helpers.
"""
def __init__(self, cmd, execute):
self._cmd = cmd
self.set_execute(execute)
def set_execute(self, execute):
"""Set the function to be used to execute commands."""
self._execute = execute
def _run(self, *args, **kwargs):
self._execute(self._cmd, *args, run_as_root=True, **kwargs)
def create_iscsi_target(self, name, tid, lun, path, **kwargs):
"""Create a iSCSI target and logical unit"""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
"""Remove a iSCSI target and logical unit"""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
"""Create a new iSCSI target."""
raise NotImplementedError()
def _delete_target(self, tid, **kwargs):
"""Delete a target."""
raise NotImplementedError()
def _show_target(self, tid, **kwargs):
"""Query the given target ID."""
raise NotImplementedError()
def _new_logicalunit(self, tid, lun, path, **kwargs):
"""Create a new LUN on a target using the supplied path."""
raise NotImplementedError()
def _delete_logicalunit(self, tid, lun, **kwargs):
"""Delete a logical unit from a target."""
raise NotImplementedError()
class TgtAdm(TargetAdmin):
"""iSCSI target administration using tgtadm."""
def __init__(self, execute=utils.execute):
super(TgtAdm, self).__init__('tgtadm', execute)
def _get_target(self, iqn):
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def create_iscsi_target(self, name, tid, lun, path, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatability
if not os.path.exists(FLAGS.volumes_dir):
os.makedirs(FLAGS.volumes_dir)
vol_id = name.split(':')[1]
volume_conf = """
<target %s>
backing-store %s
</target>
""" % (name, path)
LOG.info(_('Creating volume: %s') % vol_id)
volume_path = os.path.join(FLAGS.volumes_dir, vol_id)
f = open(volume_path, 'w+')
f.write(volume_conf)
f.close()
try:
(out, err) = self._execute('tgt-admin',
'--conf',
volume_path,
'--update',
name,
run_as_root=True)
except exception.ProcessExecutionError, e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s.") % locals())
#Dont forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing volume: %s') % vol_id)
vol_uuid_file = 'volume-%s' % vol_id
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
self._execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except exception.ProcessExecutionError, e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(volume_id)s.") % locals())
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
os.unlink(volume_path)
def show_target(self, tid, **kwargs):
iqn = kwargs.get('iqn', None)
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
class IetAdm(TargetAdmin):
"""iSCSI target administration using ietadm."""
def __init__(self, execute=utils.execute):
super(IetAdm, self).__init__('ietadm', execute)
def create_iscsi_target(self, name, tid, lun, path, **kwargs):
self._new_target(name, tid, **kwargs)
self._new_logicalunit(tid, lun, path, **kwargs)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing volume: %s') % vol_id)
self._delete_target(tid, **kwargs)
self._delete_logicalunit(tid, lun, **kwargs)
def _new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
**kwargs)
def _delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, **kwargs):
self._run('--op', 'show',
'--tid=%s' % tid,
**kwargs)
def _new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params', 'Path=%s,Type=fileio' % path,
**kwargs)
def _delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
**kwargs)
def get_target_admin():
if FLAGS.iscsi_helper == 'tgtadm':
return TgtAdm()
else:
return IetAdm()
|
Python
| 0.000002
|
@@ -1316,17 +1316,16 @@
e config
-f
uration
@@ -3500,17 +3500,17 @@
# compat
-a
+i
bility%0A%0A
@@ -4480,16 +4480,17 @@
#Don
+'
t forget
|
fc975bd573d439490a65bb72ff5f6c69b2b0a771
|
Update loudness_zwicker_lowpass_intp.py
|
mosqito/functions/loudness_zwicker/loudness_zwicker_lowpass_intp.py
|
mosqito/functions/loudness_zwicker/loudness_zwicker_lowpass_intp.py
|
# -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Standard library imports
import math
import numpy as np
def loudness_zwicker_lowpass_intp(loudness, tau, sample_rate):
"""1st order low-pass with linear interpolation of signal for
increased precision
Parameters
----------
loudness : numpy.ndarray
Loudness vs. time
tau : float
Filter parameter
sample_rate : int
Louness signal sampling frequency
Outputs
-------
filt_loudness : numpy.ndarray
Filtered loudness
"""
filt_loudness = np.zeros(np.shape(loudness))
# Factor for virtual upsampling/inner iterations
lp_iter = 24
num_samples = np.shape(loudness)[0]
a1 = math.exp(-1 / (sample_rate * lp_iter * tau))
b0 = 1 - a1
y1 = 0
for i in range(num_samples):
x0 = loudness[i]
y1 = b0 * x0 + a1 * y1
filt_loudness[i] = y1
# Linear interpolation steps between current and next sample
if i < num_samples - 1:
xd = (loudness[i + 1] - x0) / lp_iter
# Inner iterations/interpolation
for ii in range(lp_iter):
x0 += xd
y1 = b0 * x0 + a1 * y1
return filt_loudness
|
Python
| 0.014013
|
@@ -144,16 +144,98 @@
y as np%0A
+#Needed for the loudness_zwicker_lowpass_intp_ea function%0Afrom scipy import signal
%0A%0Adef lo
@@ -1222,16 +1222,130 @@
polation
+ %0A # Must add a -1 because is repeating the twice the first value at the initial of the first for loop.
%0A
@@ -1372,16 +1372,18 @@
(lp_iter
+-1
):%0A
@@ -1458,16 +1458,1736 @@
n filt_loudness%0A
+%0A%0Adef loudness_zwicker_lowpass_intp_ea(loudness, tau, sample_rate):%0A %22%22%221st order low-pass with linear interpolation of signal for%0A increased precision%0A%0A Parameters%0A ----------%0A loudness : numpy.ndarray%0A Loudness vs. time%0A tau : float%0A Filter parameter%0A sample_rate : int%0A Louness signal sampling frequency%0A%0A Outputs%0A -------%0A filt_loudness : numpy.ndarray%0A Filtered loudness%0A %22%22%22%0A filt_loudness = np.zeros(np.shape(loudness))%0A # Factor for virtual upsampling/inner iterations%0A lp_iter = 24%0A%0A num_samples = np.shape(loudness)%5B0%5D%0A a1 = math.exp(-1 / (sample_rate * lp_iter * tau))%0A b0 = 1 - a1%0A y1 = 0%0A%0A delta = np.copy(loudness)%0A delta = np.roll(delta,-1)%0A delta %5B-1%5D = 0%0A delta = (delta - loudness) / lp_iter%0A ui_delta = np.zeros(loudness.shape%5B0%5D*lp_iter).reshape(loudness.shape%5B0%5D,lp_iter)%0A ui_delta %5B:,0%5D = loudness %0A %0A #Create the array complete of deltas to apply the filter.%0A for i_in in np.arange(1, lp_iter):%0A ui_delta %5B:,i_in%5D = delta + ui_delta %5B:,i_in-1%5D %0A %0A # Rechape into a vector.%0A ui_delta = ui_delta.reshape(lp_iter*num_samples)%0A%0A # Sustituir este bucle for por scipy.signal.lfilter https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html%0A # ui_delta_filt = scipy.signal.lfilter (b0 , a0, ui_delta )%0A #filt_loudness = ui_delta_filt.reshape(loudness.shape%5B0%5D,lp_iter).T%5B:,0%5D%0A # Apply the filter.%0A ui_delta = signal.lfilter(%5Bb0%5D, %5B1,-a1%5D, ui_delta, axis=- 1, zi=None)%0A %0A # Reshape again to recover the first col.%0A ui_delta = ui_delta.reshape(loudness.shape%5B0%5D,lp_iter)%0A filt_loudness = ui_delta%5B:,0%5D%0A return filt_loudness%0A
|
88e87a8b266059a74163e43286a7d34403d46be8
|
check for numpy first
|
bokeh/util/serialization.py
|
bokeh/util/serialization.py
|
""" Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
try:
import numpy as np
is_numpy = True
except ImportError:
is_numpy = False
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum):
"""recursively dig until a flat list is found
if numpy is available convert the flat list to a numpy array
and send off to transform_array() to handle nan, inf, -inf
otherwise iterate through items in array converting non-json items
"""
if not any(isinstance(el, (list, tuple)) for el in datum) and is_numpy:
return transform_array(np.asarray(datum))
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
|
Python
| 0
|
@@ -5986,24 +5986,37 @@
%22%22%22%0A if
+is_numpy and
not any(isin
@@ -6061,21 +6061,8 @@
tum)
- and is_numpy
:%0A
|
260f5ba0b74cfbad9ed13809c62a1a942cc2be7a
|
fix style
|
tests/links_tests/connection_tests/test_conv_2d_bn_activ.py
|
tests/links_tests/connection_tests/test_conv_2d_bn_activ.py
|
import unittest
import numpy as np
import chainer
from chainer import cuda
from chainer.functions import relu
from chainer import testing
from chainer.testing import attr
from chainercv.links import Conv2DBNActiv
def _add_one(x):
return x + 1
@testing.parameterize(*testing.product({
'args_style': ['explicit', 'None', 'omit'],
'activ': ['relu', 'add_one'],
}))
class TestConv2DBNActiv(unittest.TestCase):
in_channels = 1
out_channels = 1
ksize = 3
stride = 1
pad = 1
def setUp(self):
if self.activ == 'relu':
activ = relu
elif self.activ == 'add_one':
activ = _add_one
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape(1, 1, 3, 3)
bn_kwargs = {'decay': 0.8}
initial_bias = 0
if self.args_style == 'explicit':
self.l = Conv2DBNActiv(
self.in_channels, self.out_channels, self.ksize,
self.stride, self.pad,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'None':
self.l = Conv2DBNActiv(
None, self.out_channels, self.ksize, self.stride, self.pad,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'omit':
self.l = Conv2DBNActiv(
self.out_channels, self.ksize, stride=self.stride,
pad=self.pad, initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
# Make the batch normalization to be the identity function.
self.l.bn.avg_var[:] = 1
self.l.bn.avg_mean[:] = 0
with chainer.using_config('train', False):
y = self.l(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, self.l.xp.ndarray)
if self.activ == 'relu':
np.testing.assert_almost_equal(
cuda.to_cpu(y.data), np.maximum(cuda.to_cpu(x_data), 0),
decimal=4
)
elif self.activ == 'add_one':
np.testing.assert_almost_equal(
cuda.to_cpu(y.data), cuda.to_cpu(x_data) + 1,
decimal=4
)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = y_grad
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
Python
| 0.000001
|
@@ -1022,16 +1022,17 @@
reshape(
+(
1, 1, 3,
@@ -1034,16 +1034,17 @@
1, 3, 3)
+)
%0A
|
6a58e96eaf1aa6455ed190d63c3be5e9521a73a0
|
add remove prefix helper
|
biothings_explorer/utils/common.py
|
biothings_explorer/utils/common.py
|
"""Some common utils functions for BioThings Explorer"""
from os.path import commonprefix
from ..config import id_ranks
def add_s(num: int) -> str:
"""Add 's' if num is more than one.
:param: num: An integer representing count
"""
if not isinstance(num, int):
return ''
if num <= 1:
return ''
return 's'
def dict2listoftuples(py_dict: dict) -> list:
"""Convert a single python dictionary into a list of tuples.
:param: py_dict: a single python dictionary
"""
return [(k, v) for k, v in py_dict.items()]
def listoftuples2dict(tuple_list: list) -> dict:
"""Convert a list of tuples back to a single dictionary.
:param: tuple_list: a list of tuples
"""
try:
return dict(tuple_list)
except ValueError:
raise ValueError("The input must a list of tuples \
with each tuple of length 2")
def unlist(d: dict) -> dict:
"""Find all appearance of single element list in a dictionary and unlist it.
:param: d: a python dictionary to be unlisted
"""
if isinstance(d, list):
if len(d) == 1:
return d[0]
return d
elif isinstance(d, dict):
for key, val in d.items():
if isinstance(val, list):
if len(val) == 1:
d[key] = unlist(val[0])
elif isinstance(val, dict):
unlist(val)
return d
return d
def find_longest_common_path(paths) -> str:
"""Find longest common path among a list of paths.
:param: paths: a list of paths, with '.' as the separator
"""
return commonprefix(paths).rsplit('.', 1)[0]
def get_dict_values(py_dict: dict,
excluded_keys: list = ["@type", "@input",
"$source"]) -> list:
"""Retrieve the values of a python dictionary.
:param: py_dict: a python dictionary
:param: excluded_keys: a list of keys to be excluded, \
meaning the values of these keys should be excluded
"""
return [v for k, v in py_dict.items() if k not in excluded_keys]
def get_primary_id_from_equivalent_ids(equivalent_ids: dict, _type: str):
"""Find primary id from equivalent id dict.
:param: equivalent_ids: a dictionary containing all equivalent ids of a bio-entity
:param: _type: the type of the bio-entity
"""
if not equivalent_ids:
return ''
if _type in id_ranks:
id_rank = id_ranks.get(_type)
# loop through id_rank, return the first found id
for _item in id_rank:
if equivalent_ids.get('bts:' + _item):
return (_item + ':' + str(equivalent_ids['bts:' + _item][0]))
# if no id from id_rank found, return a random one from equivalent ids
for k, v in equivalent_ids.items():
if v:
return (k[4:] + ':' + str(v[0]))
def get_name_from_equivalent_ids(equivalent_ids, input_label=None):
"""Find name from equivalent id dict.
:param: equivalent_ids: a dictionary containing all equivalent ids of a bio-entity.
:param: input_label: desginated input_label
"""
if input_label:
return input_label
if not equivalent_ids:
return "unknown"
if equivalent_ids.get('bts:symbol'):
return equivalent_ids.get('bts:symbol')[0]
if equivalent_ids.get('bts:name'):
return equivalent_ids.get('bts:name')[0]
for v in equivalent_ids.values():
if v:
if isinstance(v, list):
return v[0]
else:
return v
return "unknown"
|
Python
| 0.000002
|
@@ -1164,26 +1164,24 @@
eturn d%0A
-el
if isinstanc
@@ -2881,16 +2881,30 @@
(v%5B0%5D))%0A
+ return ''%0A
%0A%0Adef ge
@@ -3551,30 +3551,8 @@
%5B0%5D%0A
- else:%0A
@@ -3569,28 +3569,824 @@
turn v%0A return %22unknown%22%0A
+%0Adef remove_prefix(_input, prefix):%0A %22%22%22Remove all prefixes in the input.%0A %0A :param: _input: the input%0A :param: prefix: the prefix%0A %22%22%22%0A if not prefix.endswith(%22:%22):%0A prefix += ':'%0A if not _input:%0A return _input%0A if isinstance(_input, str):%0A if _input.startswith(prefix):%0A return _input%5Blen(prefix):%5D%0A return _input%0A if isinstance(_input, dict):%0A new_result = %7B%7D%0A for k, v in _input.items():%0A if k.startswith(prefix):%0A new_result%5Bk%5Blen(prefix):%5D%5D = remove_prefix(v, prefix)%0A else:%0A new_result%5Bk%5D = remove_prefix(v, prefix)%0A return new_result%0A if isinstance(_input, list):%0A return %5Bremove_prefix(item, prefix) for item in _input%5D%0A return _input%0A
|
25010fb053a5a93d0706e1f8273f5f83cc5390ee
|
fix and add condor_gsi status to status poller
|
data_collectors/general/csstatus.py
|
data_collectors/general/csstatus.py
|
import multiprocessing
from multiprocessing import Process
import logging
import time
import sys
import os
import psutil
from subprocess import Popen, PIPE
from cloudscheduler.lib.db_config import *
from cloudscheduler.lib.ProcessMonitor import ProcessMonitor
from cloudscheduler.lib.poller_functions import \
start_cycle, \
wait_cycle
import htcondor
import classad
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
def _service_msg(service_name):
# there is a special case where service_name is csv2_status and the following os command won't get rid of problem characters
if service_name == "csv2-status":
# handle it differently
file = os.popen("service "+service_name+" status | grep 'Active'")
status = file.read()
return status.splitlines()[0][11:]
else:
return os.popen("service "+service_name+" status | grep 'Active' | cut -c12-").read()
def status_poller():
multiprocessing.current_process().name = "Status Poller"
services = ["csv2-main", "csv2-openstack", "csv2-jobs", "csv2-machines", "csv2-status", "csv2-timeseries", "csv2-ec2", "csv2-htc-agent", "csv2-watch", "csv2-vm-data", "rabbitmq-server", "mariadb", "condor"]
db_service_names = {
"csv2-main": "csv2_main",
"csv2-openstack": "csv2_openstack",
"csv2-jobs": "csv2_jobs",
"csv2-machines": "csv2_machines",
"csv2-status": "csv2_status",
"csv2-timeseries": "csv2_timeseries",
"csv2-ec2": "csv2_ec2",
"csv2-htc-agent": "csv2_htc_agent",
"csv2-watch": "csv2_watch",
"csv2-vm-data": "csv2_vm_data",
"rabbitmq-server": "rabbitmq_server",
"mariadb": "mariadb",
"condor": "condor"
}
# Initialize database objects
#Base = automap_base()
#db_engine = create_engine(
# 'mysql://%s:%s@%s:%s/%s' % (
# config.db_config['db_user'],
# config.db_config['db_password'],
# config.db_config['db_host'],
# str(config.db_config['db_port']),
# config.db_config['db_name']
# )
# )
#Base.prepare(db_engine, reflect=True)
config = Config('/etc/cloudscheduler/cloudscheduler.yaml', os.path.basename(sys.argv[0]), refreshable=True)
STATUS = config.db_map.classes.csv2_system_status
config.db_open()
db_session = config.db_session
cycle_start_time = 0
new_poll_time = 0
poll_time_history = [0,0,0,0]
try:
while True:
config.refresh()
new_poll_time, cycle_start_time = start_cycle(new_poll_time, cycle_start_time)
# id will always be zero because we only ever want one row of these
system_dict = {'id': 0}
for service in services:
system_dict[db_service_names[service] + "_msg"] = _service_msg(service)
if "running" in system_dict[db_service_names[service] + "_msg"]:
system_dict[db_service_names[service] + "_status"] = 1
else:
system_dict[db_service_names[service] + "_status"] = 0
logging.error("Found service %s is dead...", service)
system_dict["load"] = round(100*( os.getloadavg()[0] / os.cpu_count() ),1)
system_dict["ram"] = psutil.virtual_memory().percent
system_dict["ram_size"] = round(psutil.virtual_memory().total/1000000000 , 1)
system_dict["ram_used"] = round(psutil.virtual_memory().used/1000000000 , 1)
system_dict["swap"] = psutil.swap_memory().percent
system_dict["swap_size"] = round(psutil.swap_memory().total/1000000000 , 1)
system_dict["swap_used"] = round(psutil.swap_memory().used/1000000000 , 1)
system_dict["disk"] = round(100*(psutil.disk_usage('/').used / psutil.disk_usage('/').total),1)
system_dict["disk_size"] = round(psutil.disk_usage('/').total/1000000000 , 1)
system_dict["disk_used"] = round(psutil.disk_usage('/').used/1000000000 , 1)
system_dict["last_updated"] = int(time.time())
new_status = STATUS(**system_dict)
try:
db_session.merge(new_status)
db_session.commit()
except Exception as exc:
logging.exception("Failed to merge and commit status update exiting")
config.db_close()
del db_session
exit(1)
wait_cycle(cycle_start_time, poll_time_history, config.categories["csstatus.py"]["sleep_interval_status"])
except Exception as exc:
logging.exception("Problem during general execution:")
logging.exception(exc)
logging.error("Exiting..")
config.db_close()
del db_session
exit(1)
if __name__ == '__main__':
process_ids = {
'status': status_poller,
}
procMon = ProcessMonitor(config_params=[os.path.basename(sys.argv[0]), "ProcessMonitor"], pool_size=8, orange_count_row='csv2_status_error_count', process_ids=process_ids)
config = procMon.get_config()
logging = procMon.get_logging()
logging.info("**************************** starting csstatus *********************************")
# Wait for keyboard input to exit
try:
#start processes
procMon.start_all()
while True:
config.refresh()
procMon.check_processes()
time.sleep(config.categories["ProcessMonitor"]["sleep_interval_main_long"])
except (SystemExit, KeyboardInterrupt):
logging.error("Caught KeyboardInterrupt, shutting down threads and exiting...")
except Exception as ex:
logging.exception("Process Died: %s", ex)
procMon.join_all()
|
Python
| 0
|
@@ -1133,24 +1133,43 @@
2-machines%22,
+ %22csv2-condor-gsi%22,
%22csv2-statu
@@ -1553,24 +1553,88 @@
machines%22, %0A
+ %22csv2-condor-gsi%22: %22csv2_condor_gsi%22, %0A
|
4748fd514fcafd9a0536b24069bf3365cb60a926
|
Bump development version number
|
debreach/__init__.py
|
debreach/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.3.0'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
|
Python
| 0
|
@@ -113,9 +113,9 @@
1.3.
-0
+1
'%0Ave
|
f4a010660aecaccf24fe2afdd5a568e06e65dd1e
|
Fix adding board_collaborator to invite
|
blimp_boards/boards/serializers.py
|
blimp_boards/boards/serializers.py
|
from django.core.exceptions import ValidationError
from rest_framework import serializers
from ..accounts.models import AccountCollaborator
from ..invitations.models import InvitedUser
from ..accounts.permissions import AccountPermission
from ..users.serializers import UserSimpleSerializer
from .models import Board, BoardCollaborator, BoardCollaboratorRequest
class BoardSerializer(serializers.ModelSerializer):
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Board
read_only_fields = ('slug', )
def validate_account(self, attrs, source):
account = attrs[source]
request = self.context['request']
view = self.context['view']
permission = AccountPermission()
has_object_permission = permission.has_object_permission(
request, view, account)
if not has_object_permission:
msg = 'You are not a collaborator in this account.'
raise serializers.ValidationError(msg)
return attrs
def save_object(self, obj, **kwargs):
obj.created_by = self.context['request'].user
return super(BoardSerializer, self).save_object(obj, **kwargs)
class BoardCollaboratorSimpleSerializer(serializers.ModelSerializer):
board = BoardSerializer()
class Meta:
model = BoardCollaborator
fields = ('id', 'board', 'user', 'invited_user', 'permission',
'date_created', 'date_modified',)
class BoardCollaboratorSerializer(serializers.ModelSerializer):
email = serializers.EmailField(write_only=True, required=False)
user_data = serializers.SerializerMethodField('get_user_data')
class Meta:
model = BoardCollaborator
read_only_fields = ('board', )
fields = ('id', 'board', 'user', 'invited_user', 'permission',
'email', 'user_data', 'date_created', 'date_modified',)
def get_user_data(self, obj):
from ..invitations.serializers import InvitedUserSimpleSerializer
if obj.invited_user:
serializer = InvitedUserSimpleSerializer(obj.invited_user)
else:
serializer = UserSimpleSerializer(obj.user)
return serializer.data
def validate_email(self, attrs, source):
email = attrs.get(source)
if not email:
return attrs
del attrs[source]
board = self.context['board']
account = board.account
try:
account_collaborator = AccountCollaborator.objects.get(
account=account, user__email=email)
attrs['user'] = account_collaborator.user
except AccountCollaborator.DoesNotExist:
invited_user_data = {
'email': email,
'account': account,
'created_by': self.context['request'].user,
}
self.invited_user, created = InvitedUser.objects.get_or_create(
email=email, account=account, defaults=invited_user_data)
attrs['invited_user'] = self.invited_user
return attrs
def validate_user(self, attrs, source):
user = attrs.get(source)
board = attrs.get('board')
if user and board and board.is_user_collaborator(user):
msg = 'User is already a collaborator in this board.'
raise ValidationError(msg)
return attrs
def save_object(self, obj, **kwargs):
created = bool(obj.pk)
board = self.context.get('board')
if not created and board:
obj.board = board
super(BoardCollaboratorSerializer, self).save_object(obj, **kwargs)
if not created and obj.invited_user:
self.invited_user.board_collaborator = obj
self.invited_user.send_invite()
class BoardCollaboratorPublicSerializer(BoardCollaboratorSerializer):
"""
BoardCollaborator serializer that removes email from
user and invited_user fields.
"""
class Meta:
model = BoardCollaborator
def get_user_data(self, obj):
data = super(BoardCollaboratorPublicSerializer,
self).get_user_data(obj)
data.pop('email', None)
return data
class BoardCollaboratorRequestSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = BoardCollaboratorRequest
|
Python
| 0
|
@@ -3757,16 +3757,53 @@
r = obj%0A
+ self.invited_user.save()%0A
|
6e3cd31c7efbea71b5f731429c24e946ce6fc476
|
Bump version
|
debreach/__init__.py
|
debreach/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '0.1.1'
version_info = version.StrictVersion(__version__).version
|
Python
| 0
|
@@ -111,11 +111,11 @@
'0.
-1.1
+2.0
'%0Ave
|
21149eb8d128c405d0b69991d1855e99ced951c7
|
Test fixed: WorkbenchUser is auto created by signal, so creating it separately is not required
|
ExperimentsManager/tests.py
|
ExperimentsManager/tests.py
|
from django.test import TestCase
from .models import Experiment
from UserManager.models import WorkbenchUser
from django.contrib.auth.models import User
from django.test import Client
class ExperimentTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', 'test@test.nl', 'test')
self.workbench_user = WorkbenchUser.objects.create(netid='jlmdegoede', user=self.user)
self.experiment = Experiment.objects.create(title='Experiment', description='test', version='1.0', owner=self.workbench_user)
def test_index_not_signed_in(self):
c = Client()
response = c.get('/experiments/')
self.assertEqual(response.status_code, 302)
def test_index_signed_in(self):
c = Client()
c.login(username='test', password='test')
response = c.get('/experiments/')
self.assertIsNotNone(response.context['table'])
|
Python
| 0
|
@@ -369,35 +369,12 @@
cts.
-create(netid='jlmdegoede',
+get(
user
@@ -880,8 +880,9 @@
table'%5D)
+%0A
|
bbf6df137f4b9b37e733750937e58131f36c6aa3
|
Clean old todos
|
guicore/guievents/alertevents.py
|
guicore/guievents/alertevents.py
|
from guicore.displayscreen import EventDispatch
import guicore.guiutils as guiutils
from controlevents import CEvent
import debug
import logsupport
from logsupport import ConsoleDetail, ConsoleWarning
from utils import timers
import alertsystem.alerttasks as alerttasks
import guicore.switcher as switcher
import screens.__screens as screens
TimerName = 0
def AlertEvents(event):
global TimerName
guiutils.HBEvents.Entry('Var or Alert' + repr(event))
evtype = 'variable' if event.type == CEvent.ISYVar else 'node'
debug.debugPrint('Dispatch', 'ISY ', evtype, ' change', event)
alert = event.alert
if alert.state in ('Armed', 'Init'):
if alert.trigger.IsTrue(): # alert condition holds
if alert.trigger.delay != 0: # delay invocation
alert.state = 'Delayed'
debug.debugPrint('Dispatch', "Post with delay:", alert.name, alert.trigger.delay)
TimerName += 1
alert.timer = timers.OnceTimer(alert.trigger.delay, start=True,
name='MainLoop' + str(TimerName),
proc=alerttasks.HandleDeferredAlert, param=alert)
else: # invoke now
alert.state = 'FiredNoDelay'
debug.debugPrint('Dispatch', "Invoke: ", alert.name)
alert.Invoke() # either calls a proc or enters a screen and adjusts alert state appropriately
else:
if alert.state == 'Armed':
# condition cleared after alert rearmed - timing in the queue?
logsupport.Logs.Log('Anomolous Trigger clearing while armed: ', repr(alert),
severity=ConsoleDetail, hb=True)
else:
alert.state = 'Armed'
logsupport.Logs.Log('Initial var value for trigger is benign: ', repr(alert),
severity=ConsoleDetail)
elif alert.state == 'Active' and not alert.trigger.IsTrue(): # alert condition has cleared and screen is up
debug.debugPrint('Dispatch', 'Active alert cleared', alert.name)
alert.state = 'Armed' # just rearm the alert
switcher.SwitchScreen(screens.HomeScreen, 'Dim', 'Cleared alert', newstate='Home')
elif alert.state == 'Active' and alert.trigger.IsTrue(): # alert condition changed but is still true
pass
elif ((alert.state == 'Delayed') or (alert.state == 'Deferred')) and not alert.trigger.IsTrue():
# condition changed under a pending action (screen or proc) so just cancel and rearm
if alert.timer is not None:
alert.timer.cancel()
alert.timer = None
else:
logsupport.DevPrint('Clear with no timer?? {}'.format(repr(alert)))
debug.debugPrint('Dispatch', 'Delayed event cleared before invoke', alert.name)
alert.state = 'Armed'
else:
logsupport.Logs.Log("Anomolous change situation State: ", alert.state, " Alert: ", repr(alert),
" Trigger IsTue: ",
alert.trigger.IsTrue(), severity=ConsoleWarning, hb=True)
debug.debugPrint('Dispatch', 'ISYVar/ISYAlert passing: ', alert.state, alert.trigger.IsTrue(),
event,
alert)
# Armed and false: irrelevant report
# Active and true: extaneous report - can happen if value changes but still is in range of true
# Delayed or deferred and true: redundant report
EventDispatch[CEvent.ISYVar] = AlertEvents
EventDispatch[CEvent.ISYAlert] = AlertEvents
|
Python
| 0.000002
|
@@ -2518,19 +2518,154 @@
'%0A%09else:
-%0A%09%09
+ # todo%0A%09%09# this is ok until maybe I figure out how to avoid. The change causes a firing and the the arming causes a second posting%0A%09%09#
logsuppo
@@ -2751,24 +2751,25 @@
r(alert),%0A%09%09
+#
%09%09%09%09%09%22 Trigg
@@ -2774,16 +2774,17 @@
gger IsT
+r
ue: %22,%0A%09
@@ -2780,24 +2780,25 @@
sTrue: %22,%0A%09%09
+#
%09%09%09%09%09alert.t
|
f149baa8ca7a401f8d2d390d84fc85960edd743d
|
Work in progress
|
dius.py
|
dius.py
|
#!python3
# Copyright (c) 2016 Petr Veprek
"""Disk Usage"""
import math
import operator
import os
import string
import sys
import time
TITLE = "Disk Usage"
VERSION = "0.0.0"
VERBOSE = False
def now(on="on", at="at"):
return "{}{} {}{}".format(on + " " if on != "" else "", time.strftime("%Y-%m-%d"), at + " " if at != "" else "", time.strftime("%H:%M:%S"))
def neat(str, max):
str = "".join([char if char in string.printable else "_" for char in str])
if len(str) > max: str = str[:max-3] + "..."
return str
def digits(max):
return math.ceil(math.log10(max))
def main():
print("{} {}".format(TITLE, VERSION))
if VERBOSE:
print("\a", end="")
print("Python {}".format(sys.version))
print("Command '{}'".format(sys.argv[0]))
print("Arguments {}".format(sys.argv[1:]))
print("Executed {}".format(now()))
start = time.time()
top = os.getcwd()
top="./Petr/Docs/_Documents" #####################################################
print("Analyzing {}".format(top))
usage = {}
for path, dirs, files in os.walk(top):
print("\rScanning {: <80}".format(neat(path, 80)), end="")
usage[path] = sum(map(os.path.getsize, filter(os.path.isfile, map(lambda file: os.path.join(path, file), files))))
print("\r {: <80}\r".format(""), end="")
usage = sorted(usage.items(), key=operator.itemgetter(1), reverse=True)
for i, (path, size) in enumerate(usage[:20]):
print("{:{}}/{} {} {}".format(i+1, digits(20), len(usage), size, path))
if VERBOSE:
elapsed = time.time() - start
seconds = round(elapsed)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
print("Completed {}".format(now()))
print("Elapsed {:d}w {:d}d {:d}h {:d}m {:d}s ({:,.3f}s)".format(weeks, days, hours, minutes, seconds, elapsed))
print("\a", end="")
if '__main__' == __name__:
main()
|
Python
| 0.000003
|
@@ -184,16 +184,38 @@
= False
+%0AWIDTH = 80%0ACOUNT = 20
%0A%0Adef no
@@ -948,95 +948,8 @@
d()%0A
- top=%22./Petr/Docs/_Documents%22 #####################################################%0A
@@ -1070,18 +1070,18 @@
ing %7B: %3C
-80
+%7B%7D
%7D%22.forma
@@ -1097,11 +1097,21 @@
th,
-80)
+WIDTH), WIDTH
), e
@@ -1266,18 +1266,18 @@
%7B: %3C
-80
+%7B%7D
%7D%5Cr%22.for
@@ -1282,16 +1282,23 @@
ormat(%22%22
+, WIDTH
), end=%22
@@ -1420,18 +1420,21 @@
(usage%5B:
-20
+COUNT
%5D):%0A
@@ -1454,16 +1454,19 @@
%7B%7D%7D/%7B%7D %7B
+:%7B%7D
%7D %7B%7D%22.fo
@@ -1482,18 +1482,21 @@
digits(
-20
+COUNT
), len(u
@@ -1507,16 +1507,37 @@
), size,
+ digits(usage%5B0%5D%5B1%5D),
path))%0A
|
9ec49083879831d7b2cfd863ea139e0e86d42c36
|
Bump release version
|
debreach/__init__.py
|
debreach/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.3.1'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
|
Python
| 0
|
@@ -111,11 +111,11 @@
'1.
-3.1
+4.0
'%0Ave
|
b386ee505976ffda9c9037af5cca6c76adf90fe1
|
Fix SDCard issue when no SDCard inserted in ECPIX5 board. Now enable to detect SDCard presence. https://github.com/litex-hub/linux-on-litex-vexriscv/issues/171
|
litex_boards/platforms/ecpix5.py
|
litex_boards/platforms/ecpix5.py
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import OpenOCDJTAGProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk100", 0, Pins("K23"), IOStandard("LVCMOS33")),
("rst_n", 0, Pins("N5"), IOStandard("LVCMOS33")),
# Leds
("rgb_led", 0,
Subsignal("r", Pins("U21")),
Subsignal("g", Pins("W21")),
Subsignal("b", Pins("T24")),
IOStandard("LVCMOS33"),
),
("rgb_led", 1,
Subsignal("r", Pins("T23")),
Subsignal("g", Pins("R21")),
Subsignal("b", Pins("T22")),
IOStandard("LVCMOS33"),
),
("rgb_led", 2,
Subsignal("r", Pins("P21")),
Subsignal("g", Pins("R23")),
Subsignal("b", Pins("P22")),
IOStandard("LVCMOS33"),
),
("rgb_led", 3,
Subsignal("r", Pins("K21")),
Subsignal("g", Pins("K24")),
Subsignal("b", Pins("M21")),
IOStandard("LVCMOS33"),
),
# Serial
("serial", 0,
Subsignal("rx", Pins("R26"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("R24"), IOStandard("LVCMOS33")),
),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"T5 M3 L3 V6 K2 W6 K3 L1",
"H2 L2 N1 J1 M1 K1"),
IOStandard("SSTL15_I")),
Subsignal("ba", Pins("U6 N3 N4"), IOStandard("SSTL15_I")),
Subsignal("ras_n", Pins("T3"), IOStandard("SSTL15_I")),
Subsignal("cas_n", Pins("P2"), IOStandard("SSTL15_I")),
Subsignal("we_n", Pins("R3"), IOStandard("SSTL15_I")),
Subsignal("dm", Pins("U4 U1"), IOStandard("SSTL15_I")),
Subsignal("dq", Pins(
"T4 W4 R4 W5 R6 P6 P5 P4",
"R1 W3 T2 V3 U3 W1 T1 W2",),
IOStandard("SSTL15_I"),
Misc("TERMINATION=75")),
Subsignal("dqs_p", Pins("V4 V1"), IOStandard("SSTL15D_I"),
Misc("TERMINATION=OFF"),
Misc("DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("H3"), IOStandard("SSTL15D_I")),
Subsignal("cke", Pins("P1"), IOStandard("SSTL15_I")),
Subsignal("odt", Pins("P3"), IOStandard("SSTL15_I")),
Misc("SLEWRATE=FAST"),
),
# RGMII Ethernetx
("eth_clocks", 0,
Subsignal("tx", Pins("A12")),
Subsignal("rx", Pins("E11")),
IOStandard("LVCMOS33")
),
("eth", 0,
Subsignal("rst_n", Pins("C13")),
Subsignal("mdio", Pins("A13")),
Subsignal("mdc", Pins("C11")),
Subsignal("rx_ctl", Pins("A11")),
Subsignal("rx_data", Pins("B11 A10 B10 A9"), Misc("PULLMODE=UP")), # RGMII mode - Advertise all capabilities.
Subsignal("tx_ctl", Pins("C9")),
Subsignal("tx_data", Pins("D8 C8 B8 A8")),
IOStandard("LVCMOS33")
),
# SDCard
("sdcard", 0,
Subsignal("data", Pins("N26 N25 N23 N21"), Misc("PULLMODE=UP")),
Subsignal("cmd", Pins("M24"), Misc("PULLMODE=UP")),
Subsignal("clk", Pins("P24")),
Subsignal("cmd_dir", Pins("M23")),
Subsignal("dat0_dir", Pins("N24")),
Subsignal("dat13_dir", Pins("P26")),
IOStandard("LVCMOS33"),
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = []
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
default_clk_name = "clk100"
default_clk_period = 1e9/100e6
def __init__(self, toolchain="trellis", **kwargs):
LatticePlatform.__init__(self, "LFE5UM5G-85F-8BG554I", _io, _connectors, toolchain=toolchain, **kwargs)
def create_programmer(self):
return OpenOCDJTAGProgrammer("openocd_ecpix5.cfg")
def do_finalize(self, fragment):
LatticePlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
self.add_period_constraint(self.lookup_request("eth_clocks:rx", 0, loose=True), 1e9/125e6)
|
Python
| 0
|
@@ -3268,24 +3268,69 @@
ns(%22P24%22)),%0A
+ Subsignal(%22cd%22, Pins(%22L22%22)),%0A
Subs
|
206e8c2da4677532add03deadac03e88a7cd0da8
|
update __init__
|
cleverhans/__init__.py
|
cleverhans/__init__.py
|
"""The CleverHans adversarial example library"""
from cleverhans.devtools.version import append_dev_version
# If possible attach a hex digest to the version string to keep track of
# changes in the development branch
__version__ = append_dev_version('2.0.0')
|
Python
| 0.000004
|
@@ -249,9 +249,9 @@
on('
-2
+3
.0.0
|
878db5485946935f8784c6c9f15decbe15c0dfbc
|
Remove catchall redirect
|
democracylab/urls.py
|
democracylab/urls.py
|
"""democracylab URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.views.generic.base import RedirectView
from . import views
urlpatterns = [
url(r'^signup/$', views.signup, name='signup'),
url(r'^login/$', views.login_view, name='login_view'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(
r'^password_reset/$',
views.password_reset,
name="password_reset",
),
url(
r'^change_password/$',
views.change_password,
name="change_password",
),
url(
r'^verify_user/(?P<user_id>[0-9]+)/(?P<token>[0-9a-z\-]+)$',
views.verify_user,
name="verify_user"
),
url(
r'^verify_user/$',
views.send_verification_email,
name="send_verification_email"
),
url(r'^', include('civictechprojects.urls')),
url(r'^$', RedirectView.as_view(url='/index/', permanent=False)),
url(r'^admin/', admin.site.urls),
url(r'^platform$', RedirectView.as_view(url='http://connect.democracylab.org/platform/', permanent=False)),
url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False)),
# url(
# r'check_email/(?P<user_email>.*)$',
# views.check_email,
# name="check_email"
# )
]
|
Python
| 0
|
@@ -1785,32 +1785,34 @@
ent=False)),%0A
+ #
url(r'%5E.*$', Re
|
491a99aa56d27d5c37af4e0538e808d2ec47d8a6
|
update problem
|
hackerrank/016_lisas_workbook.py
|
hackerrank/016_lisas_workbook.py
|
#!/bin/python3
"""
https://www.hackerrank.com/challenges/bear-and-workbook?h_r=next-challenge&h_v=zen
Lisa just got a new math workbook. A workbook contains exercise problems, grouped into chapters.
* There are n chapters in Lisa's workbook, numbered from 1 to n.
* The i-th chapter has ti problems, numbered from 1 to ti.
* Each page can hold up to k problems. There are no empty pages or unnecessary spaces, so only the last page of a chapter may contain fewer than k problems.
* Each new chapter starts on a new page, so a page will never contain problems from more than one chapter.
* The page number indexing starts at 1.
Lisa believes a problem to be special if its index (within a chapter) is the same as the page number where it's located. Given the details for Lisa's workbook, can you count its number of special problems?
Note: See the diagram in the Explanation section for more details.
Input Format
The first line contains two integers n and k — the number of chapters and the maximum number of problems per page respectively.
The second line contains n integers t1, t2,...,tn, where ti denotes the number of problems in the i-th chapter.
Contraints
1 <= n,k,ti <= 100
Output Format
Print the number of special problems in Lisa's workbook.
Sample Input
5 3
4 2 6 1 10
Sample Output
4
Explanation
The diagram below depicts Lisa's workbook with n = 5 chapters and a maximum of k = 3 problems per page. Special problems are outlined in red, and page numbers are in yellow squares.
+-Chap1+--------+ +-Chap1+---+ +-Chap2+---+ +-Chap3+---+ +-Chap3+--------+
| | | | | | | | | |
| Problem1(Red) | | Problem4 | | Problem1 | | Problem1 | | Problem4 |
| Problem2 | | | | Problem2 | | Problem2 | | Problem5(RED) |
| Problem3 | | | | | | Problem3 | | Problem6 |
| | | | | | | | | |
+---------------+ +----------+ +----------+ +----------+ +---------------+
P1 P2 P3 P4 P5
+-Chap4----+ +-Chap5----+ +-Chap5----+ +-Chap5---------+ +-Chap5----------+
| | | | | | | | | |
| Problem1 | | Problem1 | | Problem4 | | Problem7 | | Problem10(RED) |
| | | Problem2 | | Problem5 | | Problem8 | | |
| | | Problem3 | | Problem6 | | Problem9(RED) | | |
| | | | | | | | | |
| | | | | | | | | |
+----------+ +----------+ +----------+ +---------------+ +----------------+
P6 P7 P8 P9 P10
There are 4 special problems and thus we print the number 4 on a new line.
"""
import sys
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
n, k = arr[0], arr[1]
t_arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
page_number = 1
result = 0
for problems_count in t_arr:
pages_count = (problems_count - 1) // k + 1
|
Python
| 0.000001
|
@@ -1725,18 +1725,18 @@
oblem1(R
-ed
+ED
) %7C %7C Pr
|
c2ac7f24d5263ed486fd4c50e3175fc4ea84a374
|
Remove unused forms
|
webserver/codemanagement/forms.py
|
webserver/codemanagement/forms.py
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Submit
from crispy_forms.bootstrap import FormActions
from .models import BaseClient, TeamClient, TeamSubmission
password_help = """
<p class="text-info">
Choose a password for cloning and submitting your code. <br>
<span class="text-error">It will be stored inplain text</span>
so that you and your team <br> members will be able
to see it. It can be changed at any time.
</p>
"""
class TeamRepoForm(forms.ModelForm):
base = forms.ModelChoiceField(queryset=BaseClient.objects.all(),
label="Client Language")
git_password = forms.CharField(help_text=password_help)
class Meta:
model = TeamClient
fields = ('base', 'git_password')
def __init__(self, *args, **kwargs):
# Crispy form styling stuff
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.layout = Layout(
Fieldset(
"Setup your team's code repository",
'base',
'git_password',
),
FormActions(
Submit('submit', 'Submit', css_class='button white')
)
)
# Limit base repo queryset
base_clients = kwargs.pop('base_clients', None)
super(TeamRepoForm, self).__init__(*args, **kwargs)
if base_clients:
self.fields['base'].queryset = base_clients
def clean_git_password(self):
password = self.cleaned_data['git_password']
if len(password) < 6:
msg = "Password should be at least 6 characters"
raise forms.ValidationError(msg)
return password
class TeamPasswordForm(forms.ModelForm):
git_password = forms.CharField(help_text=password_help)
class Meta:
model = TeamClient
fields = ('git_password',)
def __init__(self, *args, **kwargs):
# Crispy form styling stuff
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.layout = Layout(
Fieldset(
"Change your team's git password",
'git_password',
),
FormActions(
Submit('submit', 'Submit', css_class='button white')
)
)
super(TeamPasswordForm, self).__init__(*args, **kwargs)
def clean_git_password(self):
password = self.cleaned_data['git_password']
if len(password) < 6:
msg = "Password should be at least 6 characters"
raise forms.ValidationError(msg)
return password
class SubmitForm(forms.ModelForm):
class Meta:
model = TeamSubmission
fields = ('name',)
def __init__(self, *args, **kwargs):
# Crispy form styling stuff
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.layout = Layout(
Fieldset(
'Submit to the arena',
'name',
),
FormActions(
Submit('submit', 'Submit', css_class='button white')
)
)
super(SubmitForm, self).__init__(*args, **kwargs)
def validate_unique(self):
exclude = self._get_validation_exclusions()
exclude.remove('teamclient')
try:
self.instance.validate_unique(exclude=exclude)
except forms.ValidationError, e:
msg = 'Your team already has a submission with this name.'
e.message_dict['__all__'] = [msg]
self._update_errors(e.message_dict)
class AuthForm(forms.Form):
"""Used by the RepoAuthHandler to check data from API clients"""
teamid = forms.IntegerField(required=True)
password = forms.CharField(required=True)
class PathForm(forms.Form):
"""Used by the RepoPathHandler to check data from API clients"""
teamid = forms.IntegerField(required=True)
class TagListForm(forms.Form):
"""Used by the RepoTagListHandler to obtain a list of tags"""
competition = forms.CharField(required=True)
|
Python
| 0
|
@@ -4146,298 +4146,4 @@
ue)%0A
-%0A%0Aclass PathForm(forms.Form):%0A %22%22%22Used by the RepoPathHandler to check data from API clients%22%22%22%0A teamid = forms.IntegerField(required=True)%0A%0A%0Aclass TagListForm(forms.Form):%0A %22%22%22Used by the RepoTagListHandler to obtain a list of tags%22%22%22%0A competition = forms.CharField(required=True)%0A
|
acd888679ef3005f36b9c5381da2b25440098ff0
|
clean up
|
doom.py
|
doom.py
|
import ppaquette_gym_doom
import ppaquette_gym_doom.wrappers
import numpy.random as npr
import numpy as np
import gym
import tensorflow as tf
from PIL import Image
ENV = gym.make('ppaquette/DoomBasic-v0')
wrapper = ppaquette_gym_doom.wrappers.action_space.ToDiscrete('minimal')
ENV = wrapper(ENV)
X, Y = 320, 240
#X, Y = 160, 120
XY = X * Y
wrapper = ppaquette_gym_doom.wrappers.observation_space.SetResolution('%sx%s'%(X, Y))
ENV = wrapper(ENV)
#wrapper = ppaquette_gym_doom.wrappers.control.SetPlayingMode('human')
#ENV = wrapper(ENV)
class DoomAgent(object):
def __init__(self):
self.episode_states_rgb = tf.placeholder(tf.float32, [None, Y, X, 3])
#self.episode_state_grey = tf.image.rgb_to_grayscale(self.episode_states_rgb)
self.episode_state_grey = self.episode_states_rgb[:,:,:,1] - 128
batch_size = tf.shape(self.episode_state_grey)[0]
self.episode_state_as_vector = tf.reshape(self.episode_state_grey, [batch_size, XY])
#one fully connected layer so far
W1 = tf.Variable(tf.zeros([XY, 64]))
b1 = tf.Variable(tf.zeros([64]))
self.O1 = tf.nn.relu(tf.matmul(self.episode_state_as_vector, W1) + b1)
#output layer
W2 = tf.Variable(tf.zeros([64, 4]))
self.b2 = tf.Variable(tf.zeros([1, 4]))
self.O2 = tf.nn.softmax(tf.matmul(self.O1, W2) + self.b2)
#self.O2 = tf.nn.softmax(self.b2)
self.actions = tf.placeholder(tf.uint8, [None], 'actions')
self.actions_one_hot = tf.one_hot(self.actions, 4, on_value = 1.0, off_value = 0.0, axis = -1)
self.rewards = tf.placeholder(tf.float32, [None, 1], 'rewards')
self.per_action_log_prob = tf.reduce_sum(self.O2 * self.actions_one_hot, reduction_indices = [1])
#regularised_loss = -tf.reduce_sum((rewards - tau - tau * tf.log(action_probs)) * tf.log(action_probs * (1 - actions) + actions * (1 - action_probs)), reduction_indices=[0])
self.loss = -tf.reduce_sum(self.rewards * tf.log(self.per_action_log_prob))
self.train_step = tf.train.AdamOptimizer(0.01).minimize(self.loss)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
def start_episode(self):
self.episode_rewards = []
self.episode_observations = []
self.episode_actions = []
def feedback(self, reward):
self.episode_rewards.append(reward)
def end_episode(self):
rewards = np.cumsum(np.array(self.episode_rewards)[::-1])[::-1]
rewards = rewards.reshape((rewards.shape[0], 1))
observations = np.array(self.episode_observations)
actions = np.array(self.episode_actions)
#actions = actions.reshape((actions.shape[0], 1))
_, loss, o2, b2 = self.sess.run([self.train_step, self.loss, self.O2, self.b2], feed_dict={self.episode_states_rgb: observations, self.rewards: rewards, self.actions: actions})
#loss, o2, t = self.sess.run([self.loss, self.O2, self.per_action_log_prob], feed_dict={self.episode_states_rgb: observations, self.rewards: rewards, self.actions: actions})
print 'loss', loss
#print 'o2', o2
#print 'b2', b2
def act(self, ob):
ob_as_batch = ob.reshape((1, Y, X, 3))
[o2, grey_image] = self.sess.run([self.O2, self.episode_state_grey], feed_dict={self.episode_states_rgb: ob_as_batch})
grey_image = grey_image.reshape((Y, X))
#print ob.shape
#print grey_image.shape
#img = Image.fromarray(grey_image, "F")
#img.show()
#exit(0)
#save('my.png')
#print self.O2
action = npr.multinomial(1, o2[0, :]).argmax()
self.episode_actions.append(action)
self.episode_observations.append(ob)
return action
def learn(episodes, max_steps_per_episode, visualisation_freq):
outdir = './output/'
agent = DoomAgent()
for episode in xrange(episodes):
if (episode + 1) % visualisation_freq == 0:
ENV.monitor.start(outdir, force=True)
done = False
ob = ENV.reset()
rewards = []
agent.start_episode()
j = 0
while not done and j < 5 * 35: #5 sec
j += 1
action = agent.act(ob)
new_ob, reward, done, info = ENV.step(action)
agent.feedback(reward)
ob = new_ob
if done:
break
print info['TOTAL_REWARD']
agent.end_episode()
if (episode + 1) % visualisation_freq == 0:
ENV.monitor.close()
if __name__ == '__main__':
learn(500, None, 100)
|
Python
| 0.000001
|
@@ -1755,190 +1755,8 @@
1%5D)%0A
- #regularised_loss = -tf.reduce_sum((rewards - tau - tau * tf.log(action_probs)) * tf.log(action_probs * (1 - actions) + actions * (1 - action_probs)), reduction_indices=%5B0%5D)%0A
@@ -1771,17 +1771,16 @@
.loss =
--
tf.reduc
@@ -4411,16 +4411,18 @@
earn(500
+00
, None,
|
2ebe8c55a70d3066eacd7e71c32d6fd95d4d21fd
|
fix a typo and a bug in ImageList.from_tensors
|
detectron2/structures/image_list.py
|
detectron2/structures/image_list.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import division
from typing import Any, List, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.utils.env import TORCH_VERSION
def _as_tensor(x: Tuple[int, int]) -> torch.Tensor:
"""
An equivalent of `torch.as_tensor`, but works under tracing if input
is a list of tensor. `torch.as_tensor` will record a constant in tracing,
but this function will use `torch.stack` instead.
"""
if torch.jit.is_scripting():
return torch.as_tensor(x)
if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]):
return torch.stack(x)
return torch.as_tensor(x)
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w)
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
"""
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
"""
size = self.image_sizes[idx]
return self.tensor[idx, ..., : size[0], : size[1]]
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
cast_tensor = self.tensor.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
@property
def device(self) -> device:
return self.tensor.device
@staticmethod
def from_tensors(
tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
) -> "ImageList":
"""
Args:
tensors: a tuple or list of `torch.Tensors`, each of shape (Hi, Wi) or
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
to the same shape with `pad_value`.
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
the common height and width is divisible by `size_divisibility`.
This depends on the model and many models need a divisibility of 32.
pad_value (float): value to pad
Returns:
an `ImageList`.
"""
assert len(tensors) > 0
assert isinstance(tensors, (tuple, list))
for t in tensors:
assert isinstance(t, torch.Tensor), type(t)
assert t.shape[1:-2] == tensors[0].shape[1:-2], t.shape
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
image_sizes_tensor = [_as_tensor(x) for x in image_sizes]
max_size = torch.stack(image_sizes_tensor).max(0).values
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)) // stride * stride
# handle weirdness of scripting and tracing ...
if torch.jit.is_scripting():
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
else:
# https://github.com/pytorch/pytorch/issues/42448
if TORCH_VERSION >= (1, 7) and torch.jit.is_tracing():
image_sizes = image_sizes_tensor
if len(tensors) == 1:
# This seems slightly (2%) faster.
# TODO: check whether it's faster for multiple images as well
image_size = image_sizes[0]
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
else:
# max_size can be a tensor in tracing mode, therefore convert to list
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
for img, pad_img in zip(tensors, batched_imgs):
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
return ImageList(batched_imgs.contiguous(), image_sizes)
|
Python
| 0.998763
|
@@ -2420,17 +2420,16 @@
h.Tensor
-s
%60, each
@@ -3124,25 +3124,24 @@
ert t.shape%5B
-1
:-2%5D == tens
@@ -3153,17 +3153,16 @@
%5D.shape%5B
-1
:-2%5D, t.
|
4d6febe66a3db7f63f579620000899e175fd2204
|
verbose keyerror
|
client/python/spiff.py
|
client/python/spiff.py
|
import requests
import json
import logging
import datetime
log = logging.getLogger('spiff')
class ServerError(Exception):
def __init__(self, message):
super(ServerError, self).__init__()
self.__msg = message
def __str__(self):
return self.__msg
def __repr__(self):
return "ServerError(%r)"%(self.__msg)
class SpiffObjectEncoder(json.JSONEncoder):
def __init__(self, api, *args, **kwargs):
super(SpiffObjectEncoder, self).__init__(*args, **kwargs)
self.__api = api
def default(self, obj):
if isinstance(obj, SpiffObject):
return obj.resource_uri
if isinstance(obj, datetime.date):
return str(obj)
if isinstance(obj, ObjectList):
l = []
for o in obj:
l.append(o.resource_uri)
return self.default(l)
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return super(SpiffObjectEncoder, self).default(obj)
class API(object):
def __init__(self, uri, verify=True):
super(API, self).__init__()
self.__uri = uri
self.__token = None
self.__verify = verify
def __repr__(self):
return "API(%r)"%(self.__uri)
def __str__(self):
return repr(self)
def subUri(self, uri):
return '/'.join((self.__uri, uri))
def getRequestHeaders(self):
headers = {
'accept': 'application/json',
'content-type': 'application/json'
}
if self.__token:
headers['authorization'] = 'Bearer '+self.__token
return headers
def getRaw(self, uri, **kwargs):
log.debug("Requesting via GET %s: %r", uri, kwargs)
if uri[0] == '/':
uri = uri[1:]
return requests.get(
self.subUri(uri),
verify=self.__verify,
headers=self.getRequestHeaders(),
params=kwargs
)
def postRaw(self, uri, value):
data = SpiffObjectEncoder(self, indent=2).encode(value)
log.debug("Requesting via POST %s: %s", uri, data)
return requests.post(
self.subUri(uri),
data=data,
verify=self.__verify,
headers=self.getRequestHeaders()
)
def get(self, uri, status=200, **kwargs):
return self.processResponse(self.getRaw(uri, **kwargs), status)
def post(self, uri, status=201, **kwargs):
return self.processResponse(self.postRaw(uri, kwargs), status)
def patch(self, uri, status=200, **kwargs):
return self.processResponse(self.patchRaw(uri, kwargs), status)
def patchRaw(self, uri, value):
data = SpiffObjectEncoder(self, indent=2).encode(value)
log.debug("Requesting via PATCH %s: %s", uri, data)
return requests.patch(
self.subUri(uri),
data=data,
verify=self.__verify,
headers=self.getRequestHeaders()
)
def processResponse(self, response, status):
if response.status_code != status:
if len(response.content):
try:
errorMsg = response.json()
except ValueError:
raise ServerError(response.content)
if 'traceback' in errorMsg:
raise ServerError(errorMsg['traceback'])
else:
raise ServerError(str(errorMsg['error']))
else:
response.raise_for_status()
return response.json()
def login(self, username, password):
ret = self.post('v1/member/login/', username=username, password=password,
status=200)
if 'token' in ret:
self.__token = ret['token']
return True
return False
@property
def uri(self):
return self.__uri
def getOne(self, type, id=None):
if id is None:
if isinstance(type, SpiffObject):
return SpiffObject(self, self.get(type.resource_uri))
else:
return SpiffObject(self, self.get(type))
return SpiffObject(self, self.get('%s/%s'%(type, id)))
def getList(self, type, **kwargs):
return ObjectList(self, type, kwargs)
def create(self, type, **kwargs):
return SpiffObject(self, self.post('v1/%s/'%(type), **kwargs))
class ObjectList(object):
def __init__(self, api, type, filters={}):
self.__api = api
self.__type = type
self.__cache = {}
self.__count = 20
self.__max = -1
self.__filters = {}
for name, value in filters.iteritems():
if isinstance(value, SpiffObject):
self.__filters[name] = value.id
else:
self.__filters[name] = value
self.__loadSlice(0)
def __len__(self):
if self.__max == -1:
self.__loadSlice(0)
return self.__max
def __loadSlice(self, offset, count=None):
if count is None:
count = self.__count
params = {}
params.update(self.__filters)
params['offset'] = offset
params['count'] = count
data = self.__api.get('/'.join(('v1', self.__type)), **params)
self.__max = data['meta']['total_count']
for i in range(0, len(data['objects'])):
self.__cache[i+offset] = SpiffObject(self.__api,
data['objects'][i])
def __getitem__(self, offset):
if isinstance(offset, slice):
ret = []
step = offset.step
if step is None:
step = 1
for i in range(offset.start, offset.stop, step):
ret.append(self[i])
return ret
if self.__max >= 0 and offset >= self.__max:
raise KeyError
if offset not in self.__cache:
self.__loadSlice(offset)
return self.__cache[offset]
def __iter__(self):
return ObjectListIter(self)
def __str__(self):
return repr(self)
def __repr__(self):
total = len(self)
if total >= 5:
return repr(self[0:3]+['...'])
return repr(self[0:len(self)])
class ObjectListIter(object):
def __init__(self, objectList):
self.__list = objectList
self.__pos = -1
def __iter__(self):
return self
def next(self):
self.__pos += 1
if self.__pos >= len(self.__list):
raise StopIteration
return self.__list[self.__pos]
class SpiffObject(object):
def __init__(self, api, data, type=None):
self.__api = api
self.__data = data
assert(isinstance(self.__data, dict))
if type is None:
self.__type = self.__data['resource_uri'].split('/')[1]
else:
self.__type = type
@property
def type(self):
return self.__type
def update(self, **kwargs):
for k,v in kwargs.iteritems():
self.__data[k] = v
self.__api.post('/'.join(('v1', self.__type)), kwargs)
def __repr__(self):
return "SpiffObject(%r, %r, %r)"%(self.__api, self.__data,
self.__type)
def __unicode__(self):
return str(self)
def __str__(self):
return "%s(%r)"%(self.__type,
SpiffObjectEncoder(self.__api, indent=4).encode(self.__data))
def __getattr__(self, name):
return self.__data[name]
def __getitem__(self, key):
return self.__data[key]
|
Python
| 0.998939
|
@@ -5173,16 +5173,24 @@
KeyError
+(offset)
%0A if
|
83a74c7013bd48f2dd7b6a302571bc2caf54b0e9
|
drop redundant coersion helper for EX
|
diofant/domains/expressiondomain.py
|
diofant/domains/expressiondomain.py
|
"""Implementation of :class:`ExpressionDomain` class. """
from ..core import SympifyError, sympify
from .characteristiczero import CharacteristicZero
from .field import Field
from .simpledomain import SimpleDomain
__all__ = ('ExpressionDomain',)
class ExpressionDomain(Field, CharacteristicZero, SimpleDomain):
"""A class for arbitrary expressions. """
is_SymbolicDomain = is_EX = True
class Expression:
"""An arbitrary expression. """
def __init__(self, ex):
if not isinstance(ex, self.__class__):
self.ex = sympify(ex)
else:
self.ex = ex.ex
def __str__(self):
return 'EX(%s)' % str(self.ex)
def __hash__(self):
return hash((self.__class__.__name__, self.ex))
def as_expr(self):
return self.ex
@property
def numerator(self):
return self.__class__(self.ex.as_numer_denom()[0])
@property
def denominator(self):
return self.__class__(self.ex.as_numer_denom()[1])
def simplify(self, ex):
return self.__class__(ex.cancel())
def __abs__(self):
return self.__class__(abs(self.ex))
def __neg__(self):
return self.__class__(-self.ex)
def _to_ex(self, other):
try:
return self.__class__(other)
except SympifyError:
return
def __add__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex + other.ex)
else:
return NotImplemented
def __radd__(self, other):
return self.simplify(self.__class__(other).ex + self.ex)
def __sub__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex - other.ex)
else:
return NotImplemented
def __rsub__(self, other):
return self.simplify(self.__class__(other).ex - self.ex)
def __mul__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex*other.ex)
else:
return NotImplemented
def __rmul__(self, other):
return self.simplify(self.__class__(other).ex*self.ex)
def __pow__(self, n):
n = self._to_ex(n)
if n is not None:
return self.simplify(self.ex**n.ex)
else:
return NotImplemented
def __truediv__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex/other.ex)
else:
return NotImplemented
def __rtruediv__(self, other):
return self.simplify(self.__class__(other).ex/self.ex)
def __eq__(self, other):
return self.ex == self.__class__(other).ex
def __bool__(self):
return self.ex != 0
def gcd(self, other):
from ..polys import gcd
return self.__class__(gcd(self.ex, self.__class__(other).ex))
def lcm(self, other):
from ..polys import lcm
return self.__class__(lcm(self.ex, self.__class__(other).ex))
dtype = Expression
zero = Expression(0)
one = Expression(1)
rep = 'EX'
has_assoc_Ring = False
has_assoc_Field = True
def to_expr(self, a):
"""Convert ``a`` to a Diofant object. """
return a.as_expr()
def from_expr(self, a):
"""Convert Diofant's expression to ``dtype``. """
return self.dtype(a)
def _from_PythonIntegerRing(self, a, K0):
return self(K0.to_expr(a))
def _from_PythonRationalField(self, a, K0):
return self(K0.to_expr(a))
def _from_GMPYIntegerRing(self, a, K0):
return self(K0.to_expr(a))
def _from_GMPYRationalField(self, a, K0):
return self(K0.to_expr(a))
def _from_RealField(self, a, K0):
return self(K0.to_expr(a))
def _from_PolynomialRing(self, a, K0):
return self(K0.to_expr(a))
def _from_FractionField(self, a, K0):
return self(K0.to_expr(a))
def _from_ExpressionDomain(self, a, K0):
return a
def _from_AlgebraicField(self, a, K0):
return self(K0.to_expr(a))
@property
def ring(self):
"""Returns a ring associated with ``self``. """
return self # XXX: EX is not a ring but we don't have much choice here.
def is_positive(self, a):
"""Returns True if ``a`` is positive. """
return a.ex.as_coeff_mul()[0].is_positive
def is_negative(self, a):
"""Returns True if ``a`` is negative. """
return a.ex.as_coeff_mul()[0].is_negative
def is_nonpositive(self, a):
"""Returns True if ``a`` is non-positive. """
return a.ex.as_coeff_mul()[0].is_nonpositive
def is_nonnegative(self, a):
"""Returns True if ``a`` is non-negative. """
return a.ex.as_coeff_mul()[0].is_nonnegative
def gcd(self, a, b):
return a.gcd(b)
def lcm(self, a, b):
return a.lcm(b)
EX = ExpressionDomain()
|
Python
| 0
|
@@ -4317,71 +4317,8 @@
))%0A%0A
- def _from_ExpressionDomain(self, a, K0):%0A return a%0A%0A
|
f14192b0e71e9bee1456bd240ba200920abc246b
|
add comment
|
direct/src/directnotify/Notifier.py
|
direct/src/directnotify/Notifier.py
|
"""Notifier module: contains methods for handling information output
for the programmer/user"""
from LoggerGlobal import *
import time
class Notifier:
def __init__(self, name, logger=None):
"""__init__(self, string, Logger=None)
Create a new instance of the Notifier class with a given name
and an optional Logger class for piping output to. If no logger
specified, use the global default"""
self.__name = name
if (logger==None):
self.__logger = defaultLogger
else:
self.__logger = logger
# Global default levels are initialized here
self.__info = 1
self.__warning = 1
self.__debug = 0
self.__logging = 0
def getTime(self):
"""
Return the time as a string suitable for printing at the
head of any notify message
"""
return time.strftime(":%m-%d-%Y %H:%M:%S ", time.localtime(time.time()))
def __str__(self):
"""__str__(self)
Print handling routine"""
return "%s: info = %d, warning = %d, debug = %d, logging = %d" % \
(self.__name, self.__info, self.__warning, self.__debug, self.__logging)
# Severity funcs
def setSeverity(self, severity):
from NotifySeverity import *
if severity >= NSError:
self.setWarning(0)
self.setInfo(0)
self.setDebug(0)
elif severity == NSWarning:
self.setWarning(1)
self.setInfo(0)
self.setDebug(0)
elif severity == NSInfo:
self.setWarning(1)
self.setInfo(1)
self.setDebug(0)
elif severity <= NSDebug:
self.setWarning(1)
self.setInfo(1)
self.setDebug(1)
def getSeverity(self):
from NotifySeverity import *
if self.getDebug():
return NSDebug
elif self.getInfo():
return NSInfo
elif self.getWarning():
return NSWarning
else:
return NSError
# error funcs
def error(self, errorString, exception=StandardError):
"""error(self, string, Exception=StandardError)
Raise an exception with given string and optional type:
Exception: error"""
string = (self.getTime() + str(exception) + ": " + self.__name + ": " + errorString)
self.__log(string)
raise exception(errorString)
# warning funcs
def warning(self, warningString):
"""warning(self, string)
Issue the warning message if warn flag is on"""
if (self.__warning):
string = (self.getTime() + self.__name + '(warning): ' + warningString)
self.__log(string)
print(string)
return 1 # to allow assert(myNotify.warning("blah"))
def setWarning(self, bool):
"""setWarning(self, int)
Enable/Disable the printing of warning messages"""
self.__warning = bool
def getWarning(self):
"""getWarning(self)
Return whether the printing of warning messages is on or off"""
return(self.__warning)
# debug funcs
def debug(self, debugString):
"""debug(self, string)
Issue the debug message if debug flag is on"""
if (self.__debug):
string = (self.getTime() + self.__name + '(debug): ' + debugString)
self.__log(string)
print(string)
return 1 # to allow assert(myNotify.debug("blah"))
def setDebug(self, bool):
"""setDebug(self, int)
Enable/Disable the printing of debug messages"""
self.__debug = bool
def getDebug(self):
"""getDebug(self)
Return whether the printing of debug messages is on or off"""
return(self.__debug)
# info funcs
def info(self, infoString):
"""info(self, string)
Print the given informational string, if info flag is on"""
if (self.__info):
string = (self.getTime() + self.__name + '(info): ' + infoString)
self.__log(string)
print(string)
return 1 # to allow assert(myNotify.info("blah"))
def getInfo(self):
"""getInfo(self)
Return whether the printing of info messages is on or off"""
return(self.__info)
def setInfo(self, bool):
"""setInfo(self, int)
Enable/Disable informational message printing"""
self.__info = bool
# log funcs
def __log(self, logEntry):
"""__log(self, string)
Determine whether to send informational message to the logger"""
if (self.__logging):
self.__logger.log(logEntry)
def getLogging(self):
"""getLogging(self)
Return 1 if logging enabled, 0 otherwise"""
return (self.__logging)
def setLogging(self, bool):
"""setLogging(self, int)
Set the logging flag to int (1=on, 0=off)"""
self.__logging = bool
|
Python
| 0
|
@@ -869,32 +869,187 @@
sage%0A %22%22%22
+%0A%0A # for some strange reason, time.time() updates only once/minute if%0A # the task is out of focus on win32. time.clock doesnt have this prob
%0A return
|
7be087d4bd62557acd64a9e703443a8df404e3e3
|
Make storage size an int
|
director/sessions_/providers/ec2.py
|
director/sessions_/providers/ec2.py
|
import time
from django.conf import settings
import boto.ec2
class EC2:
'''
Amazon EC2 provider for session Workers
'''
def connection(self):
'''
Get a EC2 connection to use in start() and stop()
'''
return boto.ec2.connect_to_region(
'us-west-2',
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY
)
def ami(self):
'''
Get the id of the AMI (Amazon Machine Image)
for the worker. This changes everytime the worker
image is updated so needs to be fetched by name.
It is tempting to store the id but that means it could go stale when updates
are done. Since workers won't be started that often better just to suffer the
slight wait to get the image id.
'''
connection = self.connection()
image = connection.get_all_images(
filters={'name': 'stencila-worker-image'}
)[0]
return image.id
def launch(self, worker):
'''
Translates the worker's attributes into attributes of an EC2 instance and launches it
'''
connection = self.connection()
# Determine the instance type
# Currently a very simplistic choice of instance type
# until various optimisations are done.
# See https://aws.amazon.com/ec2/pricing/
instance_type = 't2.micro'
# Note these if statements act like a series of instance
# type upgrades, not a branching if/else.
# Also, because discrete combinations of CPU and memory
# there is no guarantee that your exact combination will
# be met
if worker.cpus >= 1 and worker.memory >= 2:
instance_type = 't2.small'
if worker.cpus >= 2 and worker.memory >= 4:
instance_type = 't2.medium'
if worker.cpus >= 2 and worker.memory >= 8:
instance_type = 't2.large'
# Specify root storage device
dev_sda1 = boto.ec2.blockdevicemapping.EBSBlockDeviceType()
dev_sda1.size = worker.storage
dev_sda1.volume_type = 'gp2' # General Purpose (SSD) instead of the defaul 'standard' (magnetic)
block_device_map = boto.ec2.blockdevicemapping.BlockDeviceMapping()
block_device_map['/dev/sda1'] = dev_sda1
reservation = connection.run_instances(
image_id=self.ami(),
min_count=1,
max_count=1,
key_name='stencila-aws-us-west-2-key-pair-1',
instance_type=instance_type,
# stencila-private-subnet-1
subnet_id='subnet-a0599cf9',
# When launching into a subnet apparently `security_group_ids` must
# be used instead of `security_groups` (names)
security_group_ids=[
# stencila-worker-sg
'sg-930401f6'
],
block_device_map=block_device_map
)
instance = reservation.instances[0]
# Number of seconds to fail timeout waiting for server to launch
timeout = 120
start = time.time()
while True:
status = instance.update()
if status != 'pending':
break
if time.time()-start > timeout:
raise Exception('Timed out trying to start worker: %s' % worker)
time.sleep(1)
if status == 'running':
worker.provider_id = instance.id
worker.ip = instance.private_ip_address
instance.add_tag("Name", "stencila-worker")
else:
raise Exception('Failed to start worker: %s : %s' % (worker, status))
def terminate(self, worker):
connection = self.connection()
connection.terminate_instances(
instance_ids=[worker.provider_id]
)
|
Python
| 0.000123
|
@@ -2125,16 +2125,20 @@
.size =
+int(
worker.s
@@ -2143,16 +2143,17 @@
.storage
+)
%0A
|
a369a9648e1c0bc2cb2a4964fd2f5814d0950902
|
echo 4
|
echo.py
|
echo.py
|
import IPython
import warnings
# These are actually used for the application
import collections
from golix import Ghid
from hypergolix.service import HypergolixLink
hgxlink = HypergolixLink(threaded=True)
desktop = Ghid(algo=1, address=b'\xc0TZ\x15+\x9a\x8e\x01\xbbvw\x83\xc8%\xd5RG\x9c8<\xf7\x1f\xa4e\x08\xc4\x9a\xa0o\x15\x83f\xf2>P/\xc1\xfbj3\xd6\xa9M\x03z\x98\x1b\xa7U\xb9b\xf3 \xfd\x81T+\xb3\x14\xaa\xcf$s\xac')
razpi = Ghid(algo=1, address=b'D\xe90\x1bpr\xd3\xed\xdd\xac-,\xa9{i\xca{[\xa8\x9fy\xe4\xf2C\x0fv\x18\xa4}\xd9\xa9)=+\xe0F\xd8j~6\x07H\xadD\xb9\xa9x/\x9a\xab\x9e\x8e\xe6\x03\xe9\xaf\xd7\xbaH\x08"w\xa1>')
# Declare api
request_api = bytes(64) + b'\x01'
response_api = bytes(64) + b'\x02'
# Store objects
incoming_requests = collections.deque(maxlen=10)
incoming_responses = collections.deque(maxlen=10)
outgoing_responses = collections.deque(maxlen=10)
def request_handler(obj):
incoming_requests.appendleft(obj)
reply = hgxlink.new_object(
state = obj.state,
dynamic = True,
api_id = response_api
)
reply.share(recipient=obj.author)
outgoing_responses.appendleft(reply)
def response_handler(obj):
incoming_responses.appendleft(obj)
# register api
hgxlink.register_api(request_api, object_handler=request_handler)
hgxlink.register_api(response_api, object_handler=response_handler)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
IPython.embed()
# Not strictly necessary but suppresses warnings
hgxlink.halt()
|
Python
| 0.999988
|
@@ -89,16 +89,28 @@
lections
+%0Aimport time
%0A%0Afrom g
@@ -712,16 +712,599 @@
'%5Cx02'%0A%0A
+# Etc%0Atimer = collections.deque(%5B0,0%5D, maxlen=2)%0Arecipients = %7B razpi, desktop %7D - %7B hgxlink.whoami %7D%0A%0Adef make_request(msg):%0A obj = hgxlink.new_object(%0A state = msg,%0A dynamic = True,%0A api_id = request_api%0A )%0A %0A for recipient in recipients:%0A obj.share(recipient)%0A %0A return obj%0A %0Adef timed_update(obj, msg):%0A timer.appendleft(time.monotonic())%0A obj.update(msg)%0A %0Adef timed_update_callback(obj):%0A timer.appendleft(time.monotonic())%0A elapsed = timer%5B0%5D - timer%5B1%5D%0A print('Update mirrored in', elapsed, 'seconds.')%0A%0A
# Store
@@ -1732,34 +1732,196 @@
%0A
-def response_handler(obj):
+ def state_mirror(source_obj):%0A reply.update(source_obj.state)%0A %0A obj.add_callback(state_mirror)%0A %0Adef response_handler(obj):%0A obj.add_callback(timed_update_callback)
%0A
|
4d86d5ffa27f5ee365bc832113cd6605a23d10d3
|
Update src/acquisition/covidcast_nowcast/load_sensors.py
|
src/acquisition/covidcast_nowcast/load_sensors.py
|
src/acquisition/covidcast_nowcast/load_sensors.py
|
from shutil import move
import time
import delphi.operations.secrets as secrets
import pandas as pd
import sqlalchemy
from delphi.epidata.acquisition.covidcast.csv_importer import CsvImporter
SENSOR_CSV_PATH = "/common/covidcast-nowcast/receiving/"
TABLE_NAME = "covidcast_nowcast"
DB_NAME = "epidata"
CSV_DTYPES = {"geo_value": str, "value": float}
def main(csv_path: str = SENSOR_CSV_PATH) -> None:
"""
Parse all files in a given directory and insert them into the sensor table in the database.
For all the files found recursively in csv_path that match the naming scheme specified by
CsvImporter.find_csv_files(), attempt to load the insert them into the database. Files which do
not match the naming scheme will be moved to an archive/failed folder and skipped, and files
which raise an error during loading/uploading will be moved to the archive/failed folder and
have the error raised.
Parameters
----------
csv_path
Path to folder containing files to load.
Returns
-------
None.
"""
user, pw = secrets.db.epi
engine = sqlalchemy.create_engine(f"mysql+pymysql://{user}:{pw}@{secrets.db.host}/{DB_NAME}")
for filepath, attributes in CsvImporter.find_csv_files(csv_path):
if attributes is None:
move(filepath, filepath.replace("receiving", "archive/failed"))
continue
try:
data = load_and_prepare_file(filepath, attributes)
data.to_sql(TABLE_NAME, engine, if_exists="append", index=False)
except Exception:
move(filepath, filepath.replace("receiving", "archive/failed"))
raise
move(filepath, filepath.replace("receiving", "archive/successful"))
def load_and_prepare_file(filepath: str, attributes: tuple) -> pd.DataFrame:
"""
Read CSV file into a DataFrame and add relevant attributes as new columns to match DB table.
Parameters
----------
filepath
Path to CSV file.
attributes
(source, signal, time_type, geo_type, time_value, issue, lag) tuple
returned by CsvImport.find_csv_files
Returns
-------
DataFrame with additional attributes added as columns based on filename and current date.
"""
source, signal, time_type, geo_type, time_value, issue_value, lag_value = attributes
data = pd.read_csv(filepath, dtype=CSV_DTYPES)
data["source"] = source
data["signal"] = signal
data["time_type"] = time_type
data["geo_type"] = geo_type
data["time_value"] = time_value
data["issue"] = issue_value
data["lag"] = lag_value
data["value_updated_timestamp"] = int(time.time())
return data
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -647,19 +647,19 @@
to load
-the
+and
insert
|
4743932c54a7e3231857df022b328258121dfef1
|
For case G.number_of_nodes()=0 break
|
ComplexCiPython/ComplexCiPython.py
|
ComplexCiPython/ComplexCiPython.py
|
import networkx as nx
import csv
import time
import heapq
import os
import sys
from ci import collective_influence
def chunks(l, n, modelID):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
genL = l[i:i + n]
if len(genL)<n:
genL.extend(["" for j in range(n-len(genL))])
genL.insert(0, modelID)
yield genL
if __name__ == '__main__':
if len(sys.argv) < 7:
print("%s %s %s %s %s %s %s" % ("ComplexCiPython.py","[input file]","[output folder]","[ballRadius]","[batchNum]","[outputNum]","[methodCentrality]"))
input()
sys.exit(0);
ballRadius = int(sys.argv[3]);
batchNum = int(sys.argv[4]);
outputNum = int(sys.argv[5]);
methodCentrality = int(sys.argv[6]);
sourcePath = sys.argv[1]
modelID = os.path.basename(sourcePath).split('.')[0]
outputPath = os.path.join(sys.argv[2] , modelID +"_method_" + str(methodCentrality) + '.csv_out')
print("sourcePath: %s outputPath: %s ballRadius: %s batchNum: %s outputNum: %s methodCentrality: %s" % (sourcePath,outputPath,ballRadius,batchNum,outputNum, methodCentrality))
start_time = time.time()
G=nx.Graph();
with open(sourcePath) as f:
f_csv = csv.reader(f);
for row in f_csv:
G.add_edge(row[0],row[1]);
totalNode = G.number_of_nodes()
print("%s nodes are Loaded: " % totalNode)
print("--- %s seconds ---" % (time.time() - start_time))
finalResult = [];
while True:
if methodCentrality == 0:
ciMap = collective_influence(G,distance=ballRadius)
elif methodCentrality == 1:
ciMap = nx.degree_centrality(G)
elif methodCentrality == 2:
ciMap = nx.eigenvector_centrality(G)
elif methodCentrality == 3:
ciMap = nx.eigenvector_centrality_numpy(G)
elif methodCentrality == 4:
ciMap = nx.katz_centrality(G)
elif methodCentrality == 5:
ciMap = nx.katz_centrality_numpy(G)
elif methodCentrality == 6:
ciMap = nx.closeness_centrality(G)
elif methodCentrality == 7:
ciMap = nx.current_flow_closeness_centrality(G)
elif methodCentrality == 8:
ciMap = nx.betweenness_centrality(G)
elif methodCentrality == 9:
ciMap = nx.edge_betweenness_centrality(G)
elif methodCentrality == 10:
ciMap = nx.betweenness_centrality_subset(G)
elif methodCentrality == 11:
ciMap = nx.edge_betweenness_centrality_subset(G)
elif methodCentrality == 12:
ciMap = nx.current_flow_betweenness_centrality(G)
elif methodCentrality == 13:
ciMap = nx.edge_current_flow_betweenness_centrality(G)
elif methodCentrality == 14:
ciMap = nx.approximate_current_flow_betweenness_centrality(G)
elif methodCentrality == 15:
ciMap = nx.current_flow_betweenness_centrality_subset(G)
elif methodCentrality == 16:
ciMap = nx.edge_current_flow_betweenness_centrality_subset(G)
elif methodCentrality == 17:
ciMap = nx.communicability_betweenness_centrality(G)
elif methodCentrality == 18:
ciMap = nx.load_centrality(G)
elif methodCentrality == 19:
ciMap = nx.edge_load_centrality(G)
elif methodCentrality == 20:
ciMap = nx.subgraph_centrality(G)
elif methodCentrality == 21:
ciMap = nx.subgraph_centrality_exp(G)
elif methodCentrality == 22:
ciMap = nx.estrada_index(G)
elif methodCentrality == 23:
ciMap = nx.harmonic_centrality(G)
elif methodCentrality == 24:
ciMap = nx.local_reaching_centrality(G)
elif methodCentrality == 25:
ciMap = nx.global_reaching_centrality(G)
elif methodCentrality == 26:
ciMap = nx.pagerank(G)
elif methodCentrality == 27:
ciMap = nx.pagerank_numpy(G)
elif methodCentrality == 28:
ciMap = nx.pagerank_scipy(G)
else:
print("methodCentrality %s is not valid" % methodCentrality)
input()
sys.exit(0);
maxCiNodeIt = heapq.nlargest(batchNum, ciMap, key=ciMap.get)
G.remove_nodes_from(maxCiNodeIt);
finalResult.extend(maxCiNodeIt);
#print(maxCiNodeIt);
print("modelID: %s, Left nodes: %s, Total nodes: %s, MaxCiValue: %s, MaxCiNode: %s" % (modelID, G.number_of_nodes(), totalNode, ciMap[maxCiNodeIt[0]], maxCiNodeIt[0]))
if G.number_of_nodes()==1:
finalResult.extend(G.nodes());
break;
with open(outputPath, "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in chunks(finalResult,outputNum, modelID):
writer.writerow(line)
print("--- %s seconds ---" % (time.time() - start_time))
|
Python
| 0.999343
|
@@ -4715,16 +4715,71 @@
break;%0A%0A
+ if G.number_of_nodes()==0:%0A break;%0A%0A
with
|
a82b3b5ba8d6fba12df1a3c1993325955da893b6
|
Fix a typo in comment. Thanks for tmm1 for watching after me.
|
lib/carbon/util.py
|
lib/carbon/util.py
|
import os
import pwd
from os.path import abspath, basename, dirname, join
from twisted.python.util import initgroups
from twisted.scripts.twistd import runApp
from twisted.scripts._twistd_unix import daemonize
daemonize = daemonize # Backwards compatibility
def dropprivs(user):
uid, gid = pwd.getpwnam(user)[2:4]
initgroups(uid, gid)
os.setregid(gid, gid)
os.setreuid(uid, uid)
return (uid, gid)
def run_twistd_plugin(filename):
from carbon.conf import get_parser
from twisted.scripts.twistd import ServerOptions
bin_dir = dirname(abspath(filename))
root_dir = dirname(bin_dir)
storage_dir = join(root_dir, 'storage')
os.environ.setdefault('GRAPHITE_ROOT', root_dir)
os.environ.setdefault('GRAPHITE_STORAGE_DIR', storage_dir)
program = basename(filename).split('.')[0]
# First, parse command line options as the legacy carbon scripts used to
# do.
parser = get_parser(program)
(options, args) = parser.parse_args()
if not args:
parser.print_usage()
return
# This isn't as evil as you might think
__builtins__["instance"] = options.instance
__builtins__["program"] = program
# Then forward applicable options to either twistd or to the plugin itself.
twistd_options = ["--no_save"]
# If no reactor was selected yet, try to use the epool reactor if
# available.
try:
from twisted.internet import epollreactor
twistd_options.append("--reactor=epoll")
except:
pass
if options.debug:
twistd_options.extend(["-n", "--logfile", "-"])
if options.profile:
twistd_options.append("--profile")
if options.pidfile:
twistd_options.extend(["--pidfile", options.pidfile])
# Now for the plugin-specific options.
twistd_options.append(program)
if options.debug:
twistd_options.append("--debug")
for option_name, option_value in vars(options).items():
if (option_value is not None and
option_name not in ("debug", "profile", "pidfile")):
twistd_options.extend(["--%s" % option_name.replace("_", "-"),
option_value])
# Finally, append extra args so that twistd has a chance to process them.
twistd_options.extend(args)
config = ServerOptions()
config.parseOptions(twistd_options)
runApp(config)
|
Python
| 0.000001
|
@@ -1340,17 +1340,17 @@
the epo
-o
+l
l reacto
|
e611e9518945fa38165e8adf7103561f438b70b1
|
Add subcommand to process directory
|
interdiagram/bin/interdiagram.py
|
interdiagram/bin/interdiagram.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import List
import click
import yaml
from ..models import Diagram
click.disable_unicode_literals_warning = True
# TODO: Correct documentation schema once it's frozen
@click.command()
@click.argument('yaml-file', nargs=-1, type=click.File())
@click.argument('output-file', type=click.Path(resolve_path=True))
def cli(
yaml_file: List,
output_file: str
) -> None:
"""Generate interaction/sitemap diagram.
Example: interdiagram data1.yaml data2.yaml output.pdf
The YAML spec is in the following format:
\b
sections: # App sections (pages)
Home: # Unique key for section
actions: # List of call to actions
- Sign up: # Action name
- Sign Up # Reference to another section or component
- Login:
- Login
- Search for registry: # Could be empty
components: # List of components in this section
- Experience cards:
- Experience Card
components: # Reusable components
Experience Card:
actions:
- Go to detail:
- Add to registry:
"""
diagram = Diagram()
for f in yaml_file:
# TODO: Validate against schema
diagram.process_spec(yaml.load(f))
diagram.draw(output_file)
if __name__ == '__main__':
cli()
|
Python
| 0
|
@@ -49,26 +49,95 @@
rom
-typing import List
+pathlib import Path%0Afrom typing import Iterable, List, TypeVar%0Afrom typing.io import IO
%0A%0Aim
@@ -241,69 +241,1072 @@
ue%0A%0A
-%0A# TODO: Correct documentation schema once it's frozen%0A@click
+FileType = TypeVar('FileType', IO, Path)%0A%0A%0Adef _is_file_obj(%0A f: FileType%0A) -%3E bool:%0A read_attr = getattr(f, 'read', None)%0A has_read_method = callable(read_attr)%0A return has_read_method%0A%0A%0Adef _draw_files(%0A files: Iterable%5BFileType%5D,%0A output_file: str%0A) -%3E None:%0A diagram = Diagram()%0A for f in files:%0A # TODO: Validate against schema%0A if not _is_file_obj(f):%0A f = f.open() # type: ignore%0A diagram.process_spec(yaml.load(f))%0A diagram.draw(output_file)%0A%0A%0A# TODO: Correct documentation schema once it's frozen%0A@click.group()%0Adef cli():%0A %22%22%22Generate interaction/sitemap diagram.%22%22%22%0A%0A%0A@cli.command('dir')%0A@click.argument(%0A 'directory',%0A type=click.Path(exists=True, file_okay=False, resolve_path=True)%0A)%0A@click.argument('output-file', type=click.Path(resolve_path=True))%0Adef directory(%0A directory: str,%0A output_file: str%0A) -%3E None:%0A %22%22%22Specify a directory where YAML files reside.%22%22%22%0A files = Path(directory).glob('**/*.y*ml')%0A _draw_files(files, output_file)%0A%0A%0A@cli
.com
@@ -1441,19 +1441,21 @@
e))%0Adef
-cli
+files
(%0A
@@ -1471,16 +1471,20 @@
le: List
+%5BIO%5D
,%0A
@@ -1524,44 +1524,37 @@
%22%22%22
-Generate interaction/sitemap diagram
+Specify individual YAML files
.%0A%0A
@@ -2247,152 +2247,31 @@
-diagram = Diagram()%0A for f in yaml_file:%0A # TODO: Validate against schema%0A diagram.process_spec(yaml.load(f))%0A diagram.draw(
+_draw_files(yaml_file,
outp
|
93defcbe389ff885da95dae69a0494e8806055b9
|
Remove alreadyExist()
|
lib/core/engine.py
|
lib/core/engine.py
|
#!/usr/bin/env python
'''
Copyright (c) 2016 anti-XSS developers
'''
import os
import urllib
import urllib2
from lib.core.urlfun import *
from script import Script
from lib.core.link import Link
from lib.var.links import Links
from lib.var.scripts import Scripts
from lib.var.countpage import CountPage
from lib.var.xssscripts import XssScripts
from lib.generator.report import gnrReport
from lib.generator.scripttag import ScriptTag
from lib.generator.linkfilter import LinkFilter
from lib.generator.xsspayload import XssPayload
from lib.generator.pdfgenerator import PdfGnerator
from lib.structure.reporttext import ReportText
def alreadyExist(link):
'''
Judge if the link is already exist in links[]
'''
for iLink in Links().getContent():
if link.getUrl() == iLink.getUrl():
return True
return False
def getFatherUrl(url):
# TODO: change this silly name of this function LE.WANG
'''
Return the upper link of url
'''
fatherUrl = url[::-1]
pos = len(fatherUrl) - fatherUrl.find('/') + -1
fatherUrl = fatherUrl[::-1][:pos]
return fatherUrl
def isLink(link):
# TODO: There may meet a problem when there is a couple of same keywords in the url;
'''
Judge if this link is a 'legel' link in scanner
'''
filterList = LinkFilter().getLinkFilter()
for filterString in filterList:
if link.find(filterString) != -1:
return False
return True
def getRoot(url):
'''
Return the domain
'''
pos = url[8:].find('/')
if pos == -1:
return url
rootUrl = url[:pos + 8]
return rootUrl
def completeLink(link, hostUrl, domain):
'''
Complete the link url
'''
completedLink = link
if completedLink == '':
return hostUrl
if completedLink.find('[[site]]') != -1:
completedLink = completedLink.replace('[[site]]',domain)
elif (completedLink.find('http') == -1):
if (completedLink[0] == '?'):
completedLink = hostUrl + completedLink
elif (completedLink[0] != '/'):
completedLink = hostUrl + '/' + completedLink
else:
completedLink = getRoot(hostUrl) + completedLink
return completedLink
def getPage(rootLink, depth):
'''
Get the source code of pages and get the links on them
'''
# Init the glbal var CountPage().number with 0
CountPage(0)
Links().addText(rootLink)
# Download the source file of root link and set it as the root in BFS queue
for link in Links().getContent():
CountPage().incNumber()
if CountPage().getNumber() == depth:
return
urlRequest = urllib2.Request(link.getUrl())
urlResponse = urllib2.urlopen(urlRequest)
link.setPage(urlResponse.read())
# A humble way to insert links into queue
# TODO: Threads mode
htmlSource = link.getPage().lower()
pointer = 0
pageLength = len(htmlSource)
isAnyScript = True
while (isAnyScript) and (pointer < pageLength):
flag = False
headPos = htmlSource[pointer:].find('href="') + pointer
tailPos = htmlSource[headPos + 7:].find('"') + headPos + 7
if (headPos >= pointer) and (tailPos >= pointer):
isAnyScript = True
newUrl = htmlSource[headPos + 6:tailPos]
# Formalize the origin link
newUrl = formalize(newUrl)
# Complete it with domain
newUrl = completeLink(newUrl, link.getUrl(), link.getDomain())
# Reconstruct link
newLink = Link(newUrl, link.getDomain())
if isLink(newLink.getUrl()) and (not alreadyExist(newLink)):
Links().addText(newLink)
pointer = tailPos + 1
def getScript():
'''
Store the JavaScript
'''
scriptTags = ScriptTag().getScriptTag()
for link in Links().getContent():
for scriptTag in scriptTags:
headTag = scriptTag.replace('\n','').split('|')[0]
tailTag = scriptTag.replace('\n','').split('|')[1]
source = link.getPage()
head = 0
length = len(source)
flag = True
while ((flag) and (head < length)):
flag = False
pos1 = source[head:].find(headTag) + head
pos2 = source[head:].find(tailTag) + head
if (pos1 >= head)and(pos2 >= head):
flag = True
tempString = source[pos1:pos2 + 9]
tempString = tempString.replace('\t','')
tempString = tempString.replace('\n','')
tempString = tempString.replace(' ','')
script = Script(tempString, link.getUrl())
Scripts().addText(script)
head = pos2 + 10
def xssScanner():
'''
Store the XSS Script
'''
xssPayloads = XssPayload().getXssPayload()
for script in Scripts().getContent():
for xssPayload in xssPayloads:
if (script.getScript().find(xssPayload.replace('\n','')) > -1):
script.setDanger(True)
XssScripts().addText(script.getScript() + '\t' + script.getFromDomain())
break
gnrReport(XssScripts().getContent())
PdfGnerator(ReportText().getText())
|
Python
| 0.000001
|
@@ -631,225 +631,8 @@
xt%0A%0A
-def alreadyExist(link):%0A '''%0A Judge if the link is already exist in links%5B%5D%0A '''%0A%0A for iLink in Links().getContent():%0A if link.getUrl() == iLink.getUrl():%0A return True%0A%0A return False%0A%0A
def
@@ -3520,15 +3520,10 @@
not
-already
+is
Exis
|
2163478d2d927c4e50fcef65a88ca9c81b9d245b
|
Remove print from tests
|
menpodetect/tests/opencv_test.py
|
menpodetect/tests/opencv_test.py
|
from menpodetect.opencv import (load_opencv_frontal_face_detector,
load_opencv_eye_detector)
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
def test_frontal_face_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['object_0'][None].n_points == 4
def test_frontal_face_detector_min_neighbors():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy, min_neighbours=100)
assert len(pcs) == 0
assert takeo_copy.n_channels == 3
def test_eye_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_eye_detector()
pcs = opencv_detector(takeo_copy, min_size=(5, 5))
print takeo_copy.landmarks
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['object_0'][None].n_points == 4
|
Python
| 0.000002
|
@@ -900,39 +900,8 @@
5))%0A
- print takeo_copy.landmarks%0A
|
475552061da6a58c6953c387a639d5b4e941600b
|
Fix shell completion
|
metakernel/magics/shell_magic.py
|
metakernel/magics/shell_magic.py
|
# Copyright (c) Metakernel Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from metakernel import Magic, pexpect
from metakernel.replwrap import cmd, bash
import os
import sys
class ShellMagic(Magic):
def __init__(self, kernel):
super(ShellMagic, self).__init__(kernel)
self.repl = None
self.cmd = None
self.start_process()
def line_shell(self, *args):
"""
%shell COMMAND - run the line as a shell command
This line command will run the COMMAND in the bash shell.
Examples:
%shell ls -al
%shell cd
Note: this is a persistent connection to a shell.
The working directory is synchronized to that of the notebook
before and after each call.
You can also use "!" instead of "%shell".
"""
# get in sync with the cwd
self.eval('cd %s' % os.getcwd())
command = " ".join(args)
resp = self.eval(command)
if self.cmd == 'cmd':
cwd = self.eval('cd')
else:
cwd = self.eval('pwd')
if os.path.exists(cwd):
os.chdir(cwd)
if resp:
self.kernel.Print(resp)
def eval(self, cmd):
return self.repl.run_command(cmd, timeout=None,
stream_handler=self.kernel.Print)
def start_process(self):
if self.repl is not None:
self.repl.child.terminate()
if not self.cmd:
if pexpect.which('bash'):
self.cmd = 'bash'
self.repl = bash()
elif pexpect.which('sh'):
self.cmd = 'sh'
self.repl = bash(command='sh')
elif os.name == 'nt':
self.cmd = 'cmd'
self.repl = cmd()
else:
msg = "The command was not found or was not executable: sh"
raise Exception(msg)
def cell_shell(self):
"""
%%shell - run the contents of the cell as shell commands
This shell command will run the cell contents in the bash shell.
Example:
%%shell
cd ..
ls -al
Note: this is a persistent connection to a shell.
The working directory is synchronized to that of the notebook
before and after each call.
You can also use "!!" instead of "%%shell".
"""
self.line_shell(self.code)
self.evaluate = False
def get_completions(self, info):
if self.cmd == 'cmd':
return []
command = 'compgen -cdfa "%s"' % info['code']
completion_text = self.eval(command)
return completion_text.split()
def get_help_on(self, info, level=0):
expr = info['code'].rstrip()
if self.cmd == 'cmd':
resp = self.eval('help %s' % expr)
elif level == 0:
resp = self.eval('%s --help' % expr)
else:
resp = self.eval('man %s' % expr)
if resp and not ': command not found' in resp:
return resp
else:
return "Sorry, no help is available on '%s'." % expr
def register_magics(kernel):
kernel.register_magics(ShellMagic)
|
Python
| 0.000002
|
@@ -2733,28 +2733,54 @@
= self.
-eval(command
+repl.run_command(command, timeout=None
)%0A
|
e8ad2ca0fc2ddec71645bef31686d9de2001dd88
|
add translate type
|
modularodm/fields/StringField.py
|
modularodm/fields/StringField.py
|
from . import Field
from ..validators import StringValidator
class StringField(Field):
# default = ''
validate = StringValidator()
def __init__(self, *args, **kwargs):
super(StringField, self).__init__(*args, **kwargs)
|
Python
| 0.000017
|
@@ -101,16 +101,40 @@
lt = ''%0A
+ translate_type = str
%0A val
|
df5ac0a7f2246e5fbbb5f7d87903a5232e94fe87
|
Test deprecation.
|
morepath/tests/test_autosetup.py
|
morepath/tests/test_autosetup.py
|
from collections import namedtuple
from morepath.autosetup import (
caller_module, caller_package, autoscan,
morepath_packages, import_package)
from base.m import App
import morepath
import pytest
def setup_module(module):
morepath.disable_implicit()
def test_import():
import base
import sub
import entrypoint
from ns import real
from ns import real2
import under_score
# Pacakges to be ignored
import no_mp
from ns import nomp
import no_mp_sub
found = set(morepath_packages())
assert {base, entrypoint, real, real2, sub, under_score} <= found
assert {no_mp, nomp, no_mp_sub}.isdisjoint(found)
def test_load_distribution():
Distribution = namedtuple('Distribution', ['project_name'])
assert import_package(Distribution('base')).m.App is App
with pytest.raises(morepath.error.AutoImportError):
import_package(Distribution('inexistant-package'))
def invoke(callable):
"Add one frame to stack, no other purpose."
return callable()
def test_caller_module():
import sys
assert caller_module(1) == sys.modules[__name__]
assert invoke(caller_module) == sys.modules[__name__]
def test_caller_package():
import sys
assert caller_package(1) == sys.modules[__package__]
assert invoke(caller_package) == sys.modules[__package__]
def test_autoscan(monkeypatch):
import sys
for k in 'base.m', 'entrypoint.app', 'under_score.m':
monkeypatch.delitem(sys.modules, k, raising=False)
autoscan()
assert 'base.m' in sys.modules
assert 'entrypoint.app' in sys.modules
assert 'under_score.m' in sys.modules
|
Python
| 0.000001
|
@@ -222,24 +222,63 @@
le(module):%0A
+ with pytest.deprecated_call():%0A
morepath
|
e1184f70abd477ae2d0c304321231c908c67882b
|
add comment to authorize() that uname and pw are saved in plain text
|
msl/package_manager/authorize.py
|
msl/package_manager/authorize.py
|
"""
Create the GitHub authorization file.
"""
import getpass
from .utils import log, get_username, _get_input, _GITHUB_AUTH_PATH
WARNING_MESSAGE = """
Your username and password are saved in plain text in the file that
is created. You should set the file permissions provided by your
operating system to ensure that your GitHub credentials are safe.
"""
def authorize(username=None, password=None):
"""
Create the GitHub authorization file.
When requesting information about the MSL repositories_ that are
available on GitHub there is a limit to how often you can send
requests to the GitHub API. If you have a GitHub account and
include your username and password with each request then this
limit is increased.
Calling this function will create a file that contains your GitHub
username and password so that GitHub requests are authorized.
.. versionadded:: 2.3.0
.. _repositories: https://github.com/MSLNZ
Parameters
----------
username : :class:`str`, optional
The GitHub username. If :data:`None` then you will be
asked for the `username`.
password : :class:`str`, optional
The GitHub password. If :data:`None` then you will be
asked for the `password`.
"""
if username is None:
default = get_username()
try:
username = _get_input('Enter your GitHub username [default: {}]: '.format(default))
except KeyboardInterrupt:
log.warning('\nDid not create GitHub authorization file.')
return
else:
if not username:
username = default
if password is None:
try:
password = getpass.getpass('Enter your GitHub password: ')
except KeyboardInterrupt:
log.warning('\nDid not create GitHub authorization file.')
return
if not username:
log.warning('You must enter a username. Did not create GitHub authorization file.')
return
if not password:
log.warning('You must enter a password. Did not create GitHub authorization file.')
return
with open(_GITHUB_AUTH_PATH, 'w') as fp:
fp.write(username + ':' + password)
log.warning(WARNING_MESSAGE)
log.info('GitHub credentials saved to ' + _GITHUB_AUTH_PATH)
|
Python
| 0
|
@@ -737,24 +737,47 @@
increased.%0A%0A
+ .. important::%0A%0A
Calling
@@ -835,24 +835,27 @@
GitHub%0A
+
+
username and
@@ -903,16 +903,236 @@
horized.
+ Your%0A username and password are saved in plain text in the file that is%0A created. You should set the file permissions provided by your%0A operating system to ensure that your GitHub credentials are safe.
%0A%0A ..
|
47b97cf311c36b993b59235dedc06993a6d58b6f
|
make TestVecSim subclass object
|
new_pmlib/TestVectorSimulator.py
|
new_pmlib/TestVectorSimulator.py
|
#=========================================================================
# TestVectorSimulator
#=========================================================================
# This class simplifies creating unit tests which simply set certain
# inputs and then check certain outputs every cycle. A user simply needs
# to instantiate and elaborate the model, create a list of test vectors,
# and create two helper functions (one to set the model inputs from the
# test vector and one to verify the model outputs against the test
# vector).
#
# Each test vector should be a list of values, so a collection of test
# vectors is just a list of lists. Each test vector specifies the
# inputs/outputs corresponding to a specific cycle in sequence.
#
from new_pymtl import *
class TestVectorSimulator:
#-----------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------
def __init__( self, model, test_vectors,
set_inputs_func, verify_outputs_func, wait_cycles = 0 ):
self.model = model
self.set_inputs_func = set_inputs_func
self.verify_outputs_func = verify_outputs_func
self.test_vectors = test_vectors
self.vcd_file_name = None
self.wait_cycles = wait_cycles
#-----------------------------------------------------------------------
# Dump VCD
#-----------------------------------------------------------------------
def dump_vcd( self, vcd_file_name ):
self.vcd_file_name = vcd_file_name
#-----------------------------------------------------------------------
# Run test
#-----------------------------------------------------------------------
def run_test( self, ):
# Create a simulator using the simulation tool
sim = SimulationTool( self.model )
# Dump vcd
if self.vcd_file_name != None:
sim.dump_vcd( self.vcd_file_name )
# Iterate setting the inputs and verifying the outputs each cycle
print ""
sim.reset()
for test_vector in self.test_vectors:
# Set inputs
self.set_inputs_func( self.model, test_vector )
# Evaluate combinational concurrent blocks in simulator
if self.wait_cycles == 0: sim.eval_combinational()
else:
for i in xrange(self.wait_cycles):
sim.cycle()
# Print the line trace
sim.print_line_trace()
# Verify outputs
self.verify_outputs_func( self.model, test_vector )
# Tick the simulator one cycle
sim.cycle()
# Add a couple extra ticks so that the VCD dump is nicer
sim.cycle()
sim.cycle()
sim.cycle()
|
Python
| 0
|
@@ -786,16 +786,26 @@
imulator
+( object )
:%0A%0A #--
|
a335445273ebeca077f3508392fc347e62df64ca
|
Add name to argument list
|
nnadapter/backend/torchlegacy.py
|
nnadapter/backend/torchlegacy.py
|
"""
Thanks to clcarwin
https://github.com/clcarwin/convert_torch_to_pytorch
"""
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.serialization import load_lua
class LambdaBase(nn.Sequential):
def __init__(self, name, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
self.name = name
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
def __str__(self):
return self.name
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
# result is Variables list [Variable1, Variable2, ...]
return map(self.lambda_func, self.forward_prepare(input))
class LambdaReduce(LambdaBase):
def forward(self, input):
# result is a Variable
return reduce(self.lambda_func, self.forward_prepare(input))
def copy_param(m, n):
if m.weight is not None: n.weight.data.copy_(m.weight)
if m.bias is not None: n.bias.data.copy_(m.bias)
if hasattr(n, 'running_mean'): n.running_mean.copy_(m.running_mean)
if hasattr(n, 'running_var'): n.running_var.copy_(m.running_var)
def add_submodule(seq, *args):
for n in args:
seq.add_module(str(len(seq._modules)), n)
def lua_recursive_model(module, seq):
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.', '')
m = m._obj
if name == 'SpatialConvolution':
if not hasattr(m, 'groups'): m.groups = 1
n = nn.Conv2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), 1, m.groups,
bias=(m.bias is not None))
copy_param(m, n)
add_submodule(seq, n)
elif name == 'SpatialBatchNormalization':
n = nn.BatchNorm2d(m.running_mean.size(0), m.eps, m.momentum, m.affine)
copy_param(m, n)
add_submodule(seq, n)
elif name == 'ReLU' or name == 'Threshold':
n = nn.ReLU()
add_submodule(seq, n)
elif name == 'SpatialMaxPooling':
n = nn.MaxPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), ceil_mode=m.ceil_mode)
add_submodule(seq, n)
elif name == 'SpatialAveragePooling':
n = nn.AvgPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), ceil_mode=m.ceil_mode)
add_submodule(seq, n)
elif name == 'SpatialUpSamplingNearest':
n = nn.UpsamplingNearest2d(scale_factor=m.scale_factor)
add_submodule(seq, n)
elif name == 'View':
n = Lambda(name, lambda x: x.view(x.size(0), -1))
add_submodule(seq, n)
elif name == 'Linear':
# Linear in pytorch only accept 2D input
#n1 = Lambda(name, lambda x: x.view(1, -1) if 1 == len(x.size()) else x)
n2 = nn.Linear(m.weight.size(1), m.weight.size(0), bias=(m.bias is not None))
copy_param(m, n2)
#n = nn.Sequential(n1, n2)
add_submodule(seq, n2)
elif name == 'Dropout':
m.inplace = False
n = nn.Dropout(m.p)
add_submodule(seq, n)
elif name == 'SoftMax':
n = nn.Softmax()
add_submodule(seq, n)
elif name == 'Identity':
n = Lambda(name, lambda x: x) # do nothing
add_submodule(seq, n)
elif name == 'SpatialFullConvolution':
n = nn.ConvTranspose2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH))
add_submodule(seq, n)
elif name == 'SpatialReplicationPadding':
n = nn.ReplicationPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
add_submodule(seq, n)
elif name == 'SpatialReflectionPadding':
n = nn.ReflectionPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
add_submodule(seq, n)
elif name == 'Copy':
n = Lambda(name, lambda x: x) # do nothing
add_submodule(seq, n)
elif name == 'Narrow':
n = Lambda(name, lambda x, a=(m.dimension, m.index, m.length): x.narrow(*a))
add_submodule(seq, n)
elif name == 'SpatialCrossMapLRN':
lrn = torch.legacy.nn.SpatialCrossMapLRN(m.size, m.alpha, m.beta, m.k)
n = Lambda(name, lambda x, lrn=lrn: Variable(lrn.forward(x.data)))
add_submodule(seq, n)
elif name == 'Sequential':
n = nn.Sequential()
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'ConcatTable': # output is list
n = LambdaMap(name, lambda x: x)
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'CAddTable': # input is list
n = LambdaReduce(lambda x, y: x + y)
add_submodule(seq, n)
elif name == 'Concat':
dim = m.dimension
n = LambdaReduce(name, lambda x, y, dim=dim: torch.cat((x, y), dim))
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'TorchObject':
print('Not Implement', name, real._typename)
else:
print('Not Implement', name)
def load_legacy_model(t7_filename):
model = load_lua(t7_filename, unknown_classes=True)
if type(model).__name__ == 'hashable_uniq_dict':
model = model.model
model.gradInput = None
n = nn.Sequential()
lua_recursive_model(model, n)
return n
|
Python
| 0.000005
|
@@ -5110,16 +5110,22 @@
aReduce(
+name,
lambda x
|
2388b28aed8e01930f2e6138341b754c5a931c43
|
Update speakonomy.py
|
speakerbot/speakonomy.py
|
speakerbot/speakonomy.py
|
import datetime as dt
import os
import sys
from speaker_db import SpeakerDB
from dynamic_class import Singleton
class Speakonomy:
__metaclass__ = Singleton
def __init__(self, speakerbot=None, disabled=False):
self.db = SpeakerDB()
self.speakerbot = speakerbot
self.disabled = disabled
self.free_play_timeout = None
def check_affordability(self, sound_name=None, cost=None, free=False):
if not self.is_active() or free:
return True
if not cost:
cost = self.db.execute("SELECT cost FROM sounds WHERE name=?", [sound_name,]).fetchone()['cost']
balance = self.get_speakerbuck_balance()
if cost <= balance:
return True
return False
def deposit_funds(self, amount=1):
assert isinstance(amount,int)
self.db.execute("UPDATE bank_account set balance=balance+{}".format(amount))
def get_free_play_timeout(self, force_check=False):
if self.free_play_timeout and not force_check:
return self.free_play_timeout
expiration_timestamp = self.db.execute("SELECT free_play_timeout FROM bank_account").fetchone()['free_play_timeout']
self.free_play_timeout = dt.datetime.fromtimestamp(expiration_timestamp)
return self.free_play_timeout
def get_last_withdrawal_time(self, include_sbpm=False):
last_withdrawal_time = self.db.execute("SELECT last_withdrawal_time FROM bank_account").fetchone()['last_withdrawal_time']
last_withdrawal_time = dt.datetime.fromtimestamp(last_withdrawal_time)
if not include_sbpm:
return last_withdrawal_time
today_time = dt.datetime.combine(dt.date.today(), dt.time(8, 00))
if last_withdrawal_time < today_time:
last_withdrawal_time = today_time
minutes_since_last_withdrawal = (dt.datetime.now() - last_withdrawal_time).total_seconds() / 60
spbm = int((minutes_since_last_withdrawal + 9) / 10)
return last_withdrawal_time, spbm
def get_speakerbuck_balance(self):
balance = self.db.execute("SELECT balance FROM bank_account").fetchone()
if balance:
return balance['balance']
return 0
def is_active(self, force_check=False):
if self.disabled:
return False
if dt.datetime.today().weekday() in [5,6]:
return False
current_hour = dt.datetime.now().hour
if current_hour < 8 or current_hour > 18:
return False
if dt.datetime.now() < self.get_free_play_timeout(force_check=force_check):
return False
return True
def regulate_costs(self):
self.db.execute("UPDATE sounds set cost=FLOOR(0.95*cost+0.05*base_cost) WHERE cost > base_cost")
self.db.execute("UPDATE sounds set cost=base_cost WHERE cost < base_cost")
def sell_sound(self, sound_name, **kwargs):
if self.is_active() and not kwargs.get('free'):
cost = int(self.db.execute("SELECT cost FROM sounds WHERE name=?", [sound_name,]).fetchone()['cost'])
self.withdraw_funds(cost)
self.db.execute("UPDATE sounds set cost=cost*2 where name=?", [sound_name,])
def set_free_play_timeout(self, expiration_datetime=None, hours=0, minutes=0):
if not expiration_datetime:
expiration_datetime = dt.datetime.now() + dt.timedelta(hours=hours, minutes=minutes)
expiration_timestamp = expiration_datetime.strftime("%s")
self.db.execute("UPDATE bank_account SET free_play_timeout=?", [expiration_timestamp,])
self.free_play_timeout = dt.datetime.fromtimestamp(float(expiration_timestamp))
def get_sound_base_cost(self, sound_path):
try:
sound_size = os.stat(sound_path).st_size
sound_cost = int(sound_size/1024 * 0.0854455 - 0.0953288 + 0.5)
if sound_cost < 1:
sound_cost = 1
except:
sound_cost = 0
return sound_cost
def set_sound_base_costs(self, sound_dir="sounds"):
assert self.speakerbot != None
if not self.speakerbot.sounds:
self.speakerbot.load_sounds()
for sound_name in self.speakerbot.sounds:
sound_path = '{}/{}'.format(sound_dir, self.speakerbot.sounds[sound_name][0])
sound_cost = self.get_sound_base_cost(sound_path)
self.db.execute("UPDATE sounds SET base_cost={} where name='{}'".format(sound_cost, sound_name))
def withdraw_funds(self, amount):
if self.is_active(True):
self.deposit_funds(amount=-1*amount)
withdrawal_time = dt.datetime.now().strftime("%s")
self.db.execute("UPDATE bank_account SET last_withdrawal_time={}".format(withdrawal_time))
if __name__ == "__main__":
speakonomy = Speakonomy()
if not speakonomy.is_active():
pass
try:
deposit_amount = int(sys.argv[1])
except:
last_withdrawal_time, deposit_amount = speakonomy.get_last_withdrawal_time(include_sbpm=True)
if deposit_amount < 1:
deposit_amount = 1
print "Depositing {}...".format(deposit_amount)
speakonomy.deposit_funds(deposit_amount)
speakonomy.regulate_costs()
|
Python
| 0.000001
|
@@ -788,46 +788,8 @@
1):%0A
- assert isinstance(amount,int)%0A
@@ -853,26 +853,21 @@
nce+
-%7B%7D%22.format(
+?%22, %5B
amount
-)
+,%5D
)%0A%0A
|
0cd4862062bbe19aec5bb2a23563e03eb8ca8cb7
|
Fix stable release script
|
make_stable_release.py
|
make_stable_release.py
|
from robot.libdoc import libdoc
from src.WhiteLibrary.version import VERSION
import git
import sys
VERSION_FILE = './src/WhiteLibrary/version.py'
def change_stable(from_stable, to_stable):
with open(VERSION_FILE, 'r') as file :
filedata = file.read()
filedata = filedata.replace('STABLE = {0}'.format(from_stable), 'STABLE = {0}'.format(to_stable)
with open(VERSION_FILE, 'w') as file:
file.write(filedata)
def change_version_number(ver):
with open(VERSION_FILE, 'r') as file :
filedata = file.read()
filedata = filedata.replace('VERSION = "{0}"'.format(VERSION), 'VERSION = "{0}"'.format(ver))
with open(VERSION_FILE, 'w') as file:
file.write(filedata)
repo = git.Repo( '.' )
change_stable("False", "True")
print(str(sys.argv))
change_version_number(sys.argv[1])
libdoc("./src/WhiteLibrary", "./docs/keywords.html", version=VERSION)
ver = "v".format(VERSION)
repo.git.add(VERSION_FILE)
repo.git.add('./docs/keywords.html')
repo.git.commit( m='Making stable release: {0}'.format(ver) )
tag = repo.git.create_tag(ver, message='New stable version: "{0}"'.format(ver))
repo.git.push(tag)
change_stable("True", "False")
repo.git.add(VERSION_FILE)
repo.git.commit( m='Back to unstable release' )
repo.git.push()
|
Python
| 0.000001
|
@@ -141,16 +141,17 @@
on.py'%0A%0A
+%0A
def chan
@@ -185,17 +185,16 @@
table):%0A
-%0A
with
@@ -217,33 +217,32 @@
LE, 'r') as file
-
:%0A fileda
@@ -359,16 +359,17 @@
_stable)
+)
%0A%0A wi
@@ -434,16 +434,17 @@
edata)%0A%0A
+%0A
def chan
@@ -507,17 +507,16 @@
as file
-
:%0A
@@ -712,16 +712,17 @@
edata)%0A%0A
+%0A
repo = g
@@ -733,13 +733,11 @@
epo(
-
'.'
-
)%0A%0Ac
@@ -770,26 +770,30 @@
e%22)%0A
-print(str(
+new_version =
sys.argv
))%0Ac
@@ -788,18 +788,19 @@
sys.argv
-))
+%5B1%5D
%0Achange_
@@ -814,27 +814,27 @@
_number(
-sys.argv%5B1%5D
+new_version
)%0Alibdoc
@@ -888,23 +888,27 @@
version=
-VERSION
+new_version
)%0A%0Aver =
@@ -910,16 +910,18 @@
ver = %22v
+%7B%7D
%22.format
@@ -921,23 +921,27 @@
.format(
-VERSION
+new_version
)%0Arepo.g
@@ -1014,17 +1014,16 @@
.commit(
-
m='Makin
@@ -1056,17 +1056,16 @@
mat(ver)
-
)%0Atag =
@@ -1069,20 +1069,16 @@
= repo.
-git.
create_t
@@ -1135,27 +1135,38 @@
(ver))%0Arepo.
+remotes.ori
gi
-t
+n
.push(tag)%0Ac
@@ -1164,16 +1164,33 @@
sh(tag)%0A
+repo.git.push()%0A%0A
change_s
@@ -1256,17 +1256,16 @@
.commit(
-
m='Back
@@ -1284,17 +1284,16 @@
release'
-
)%0Arepo.g
|
6d910ea91b550864f445fe33d0a29ef11a82f762
|
Replace iteritmes() with items()
|
malcolm/core/method.py
|
malcolm/core/method.py
|
#!/bin/env dls-python
from collections import OrderedDict
from malcolm.core.loggable import Loggable
class Method(Loggable):
"""Exposes a function with metadata for arguments and return values"""
def __init__(self, name):
super(Method, self).__init__(logger_name=name)
self.name = name
self.func = None
self.takes = None
self.returns = None
self.defaults = None
def set_function(self, func):
"""Set the function to expose.
Function must return accept a dictionary of keyword arguments
and return either a single value or dictionary of results.
"""
self.func = func
def set_function_takes(self, arg_meta, defaults=None):
"""Set the arguments and default values for the method
Args:
arg_meta (MapMeta): Arguments to the function
default (dict): Default values for arguments (default None)
"""
self.takes = arg_meta
if defaults is not None:
self.defaults = OrderedDict(defaults)
else:
self.defaults = OrderedDict()
def set_function_returns(self, return_meta):
"""Set the return parameters for the method to validate against"""
self.returns = return_meta
def __call__(self, *args, **kwargs):
"""Call the exposed function using regular keyword argument parameters.
Will validate the output against provided return parameters.
"""
# Assumes positional arguments represent arguments *before* any kw-args
# in the ordered dictionary.
for arg, arg_val in zip(self.takes.elements.keys(), args):
kwargs[arg] = arg_val
for arg in self.takes.elements:
if arg not in kwargs.keys():
if arg in self.defaults.keys():
kwargs[arg] = self.defaults[arg]
elif arg in self.takes.required:
raise ValueError(
"Argument %s is required but was not provided" % arg)
return_val = self.func(kwargs)
if self.returns is not None:
if return_val.keys() != self.returns.elements.keys():
raise ValueError(
"Return result did not match specified return structure")
for r_name, r_val in return_val.iteritems():
self.returns.elements[r_name].validate(r_val)
return return_val
def handle_request(self, request):
"""Call exposed function using request parameters and respond with the
result"""
result = self(**request.parameters)
request.respond_with_return(result)
def to_dict(self):
"""Return ordered dictionary representing Method object."""
serialized = OrderedDict()
serialized["takes"] = self.takes.to_dict()
serialized["defaults"] = self.defaults.copy()
serialized["returns"] = self.returns.to_dict()
return serialized
|
Python
| 0.99999
|
@@ -2344,12 +2344,8 @@
val.
-iter
item
|
ef5ed1bedd80ae03b9174088e579b5d85507266c
|
Update forms.py
|
app/auth/forms.py
|
app/auth/forms.py
|
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, ValidationError
from wtforms.validators import Required, Email, Length, Regexp, EqualTo
from ..models import User
class LoginForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Login')
class RegistrationForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Usernames must have only letters, numbers, dots or underscore.')])
password = PasswordField('Password', validators=[Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[Required()])
password = PasswordField('New password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(Form):
email = StringField('New Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
|
Python
| 0.000001
|
@@ -213,16 +213,59 @@
rt User%0A
+from app.exceptions import ValidationError%0A
%0A%0Aclass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.