commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
121595a21d0eda92789de28d2a187dff4f14a8c3
|
Remove unused import, remove Windows reference
|
tests/unit/beacons/test_watchdog.py
|
tests/unit/beacons/test_watchdog.py
|
# coding: utf-8
# Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import tempfile
import time
# Salt libs
import salt.utils.files
import salt.utils.platform
from salt.beacons import watchdog
from salt.ext.six.moves import range
# Salt testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mixins import LoaderModuleMockMixin
def check_events(config):
total_delay = 1
delay_per_loop = 20e-3
for _ in range(int(total_delay / delay_per_loop)):
events = watchdog.beacon(config)
if events:
return events
time.sleep(delay_per_loop)
return []
def create(path, content=None):
with salt.utils.files.fopen(path, 'w') as f:
if content:
f.write(content)
os.fsync(f)
@skipIf(not watchdog.HAS_WATCHDOG, 'watchdog is not available')
class IWatchdogBeaconTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.beacons.watchdog on Windows
'''
def setup_loader_modules(self):
return {watchdog: {}}
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
watchdog.close({})
shutil.rmtree(self.tmpdir, ignore_errors=True)
def assertValid(self, config):
ret = watchdog.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
def test_empty_config(self):
config = [{}]
ret = watchdog.beacon(config)
self.assertEqual(ret, [])
def test_file_create(self):
path = os.path.join(self.tmpdir, 'tmpfile')
config = [{'directories': {self.tmpdir: {'mask': ['create']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
create(path)
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'created')
def test_file_modified(self):
path = os.path.join(self.tmpdir, 'tmpfile')
# Create triggers a modify event along with the create event in Py3
# So, let's do this before configuring the beacon
create(path)
config = [{'directories': {self.tmpdir: {'mask': ['modify']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
create(path, 'some content')
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'modified')
def test_file_deleted(self):
path = os.path.join(self.tmpdir, 'tmpfile')
create(path)
config = [{'directories': {self.tmpdir: {'mask': ['delete']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
os.remove(path)
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'deleted')
def test_file_moved(self):
path = os.path.join(self.tmpdir, 'tmpfile')
create(path)
config = [{'directories': {self.tmpdir: {'mask': ['move']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
os.rename(path, path + '_moved')
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'moved')
def test_file_create_in_directory(self):
config = [{'directories': {self.tmpdir: {'mask': ['create']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
path = os.path.join(self.tmpdir, 'tmpfile')
create(path)
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'created')
def test_trigger_all_possible_events(self):
path = os.path.join(self.tmpdir, 'tmpfile')
moved = path + '_moved'
config = [{'directories': {
self.tmpdir: {},
}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
# create
create(path)
# modify
create(path, 'modified content')
# move
os.rename(path, moved)
# delete
os.remove(moved)
# Give the events time to load into the queue
time.sleep(1)
ret = check_events(config)
events = {'created': '',
'deleted': '',
'moved': ''}
modified = False
for event in ret:
if event['change'] == 'created':
self.assertEqual(event['path'], path)
events.pop('created', '')
if event['change'] == 'moved':
self.assertEqual(event['path'], path)
events.pop('moved', '')
if event['change'] == 'deleted':
self.assertEqual(event['path'], moved)
events.pop('deleted', '')
# "modified" requires special handling
# All events [created, moved, deleted] also trigger a "modified"
# event on Linux
# Only the "created" event triggers a modified event on Py3 Windows
# When the "modified" event triggers on modify, it will have the
# path to the temp file (path), other modified events will contain
# the path minus "tmpfile" and will not match. That's how we'll
# distinguish the two
if event['change'] == 'modified':
if event['path'] == path:
modified = True
# Check results of the for loop to validate modified
self.assertTrue(modified)
# Make sure all events were checked
self.assertDictEqual(events, {})
|
Python
| 0
|
@@ -190,35 +190,8 @@
les%0A
-import salt.utils.platform%0A
from
@@ -983,19 +983,8 @@
hdog
- on Windows
%0A
|
46fd008640bd80dc9e22127262e47b2519779d5f
|
Make current ticket (if specified) available to template
|
byceps/blueprints/seating/views.py
|
byceps/blueprints/seating/views.py
|
"""
byceps.blueprints.seating.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...config import get_seat_management_enabled, get_ticket_management_enabled
from ...services.seating import area_service as seating_area_service
from ...services.seating.models.seat import Seat, SeatID
from ...services.seating import seat_service
from ...services.ticketing.models.ticket import Ticket, TicketID
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.framework.flash import flash_success
from ...util.framework.templating import templated
from ...util.views import respond_no_content
blueprint = create_blueprint('seating', __name__)
@blueprint.route('/')
@templated
def index():
"""List areas."""
areas = seating_area_service.get_areas_for_party(g.party_id)
return {
'areas': areas,
}
@blueprint.route('/areas/<slug>')
@templated
def view_area(slug):
"""View area."""
area = seating_area_service.find_area_for_party_by_slug(g.party_id, slug)
if area is None:
abort(404)
seat_management_enabled = get_seat_management_enabled()
ticket_management_enabled = get_ticket_management_enabled()
return {
'area': area,
'seat_management_enabled': seat_management_enabled,
'ticket_management_enabled': ticket_management_enabled,
}
@blueprint.route('/ticket/<uuid:ticket_id>/seat/<uuid:seat_id>', methods=['POST'])
@respond_no_content
def occupy_seat(ticket_id, seat_id):
"""Use ticket to occupy seat."""
_abort_if_seat_management_disabled()
ticket = _get_ticket_or_404(ticket_id)
manager = g.current_user
if not ticket.is_seat_managed_by(manager.id):
abort(403)
seat = _get_seat_or_404(seat_id)
if seat.is_occupied:
abort(403)
ticket_service.occupy_seat(ticket.id, seat.id, manager.id)
flash_success('{} wurde mit Ticket {} reserviert.', seat.label, ticket.code)
@blueprint.route('/ticket/<uuid:ticket_id>/seat', methods=['DELETE'])
@respond_no_content
def release_seat(ticket_id):
"""Release the seat."""
_abort_if_seat_management_disabled()
ticket = _get_ticket_or_404(ticket_id)
if not ticket.occupied_seat:
abort(404)
manager = g.current_user
if not ticket.is_seat_managed_by(manager.id):
abort(403)
seat = ticket.occupied_seat
ticket_service.release_seat(ticket.id, manager.id)
flash_success('{} wurde freigegeben.', seat.label)
def _abort_if_seat_management_disabled() -> None:
if not get_seat_management_enabled():
flash_error('Sitzplätze können derzeit nicht verändert werden.')
abort(403)
def _get_ticket_or_404(ticket_id: TicketID) -> Ticket:
ticket = ticket_service.find_ticket(ticket_id)
if (ticket is None) or ticket.revoked:
abort(404)
return ticket
def _get_seat_or_404(seat_id: SeatID) -> Seat:
seat = seat_service.find_seat(seat_id)
if seat is None:
abort(404)
return seat
|
Python
| 0
|
@@ -159,16 +159,45 @@
s.%0A%22%22%22%0A%0A
+from typing import Optional%0A%0A
from fla
@@ -214,16 +214,25 @@
abort, g
+, request
%0A%0Afrom .
@@ -683,16 +683,29 @@
h import
+ flash_error,
flash_s
@@ -1367,24 +1367,75 @@
_enabled()%0A%0A
+ current_ticket_id = _find_current_ticket_id()%0A%0A
return %7B
@@ -1581,25 +1581,614 @@
nabled,%0A
-%7D
+ 'current_ticket_id': current_ticket_id,%0A %7D%0A%0A%0Adef _find_current_ticket_id() -%3E Optional%5BTicketID%5D:%0A ticket_code = request.args.get('ticket')%0A if ticket_code is None:%0A return None%0A%0A ticket = ticket_service.find_ticket_by_code(ticket_code)%0A if ticket is None:%0A flash_error('Unbekannte Ticket-ID')%0A return None%0A%0A if not ticket.is_seat_managed_by(g.current_user.id):%0A flash_error(%0A 'Du bist nicht berechtigt, den Sitzplatz '%0A 'f%C3%BCr Ticket %7B%7D zu verwalten.',%0A ticket.code)%0A return None%0A%0A return ticket.id
%0A%0A%0A@blueprin
|
4063397a50c7171acb038132b4c8e59030b382c9
|
Replace exceptions.ValidationError with serializers.ValidationError
|
kpi/serializers/v2/asset_export_settings.py
|
kpi/serializers/v2/asset_export_settings.py
|
# coding: utf-8
from django.utils.translation import ugettext as _
from rest_framework import serializers, exceptions
from rest_framework.reverse import reverse
from kpi.models import AssetExportSettings
from kpi.fields import WritableJSONField
OPTIONAL_EXPORT_SETTINGS = ('fields',)
REQUIRED_EXPORT_SETTINGS = (
'fields_from_all_versions',
'group_sep',
'hierarchy_in_labels',
'lang',
'multiple_select',
'type',
)
VALID_EXPORT_SETTINGS = OPTIONAL_EXPORT_SETTINGS + REQUIRED_EXPORT_SETTINGS
VALID_MULTIPLE_SELECTS = (
'both',
'summary',
'details',
)
VALID_EXPORT_TYPES = (
'csv',
'geojson',
'kml',
'spss',
'xlsx',
'zip',
)
VALID_DEFAULT_LANGUAGES = (
'_xml',
'_default',
)
VALID_BOOLEANS = (
'true',
'false',
)
class AssetExportSettingsSerializer(serializers.ModelSerializer):
uid = serializers.ReadOnlyField()
url = serializers.SerializerMethodField()
name = serializers.CharField()
date_modified = serializers.CharField(read_only=True)
export_settings = WritableJSONField()
class Meta:
model = AssetExportSettings
fields = (
'uid',
'url',
'name',
'date_modified',
'export_settings',
)
read_only_fields = (
'uid',
'url',
'date_modified',
)
def validate_export_settings(self, export_settings):
asset = self.context['view'].asset
asset_languages = asset.summary.get('languages', ())
all_valid_languages = (*asset_languages, *VALID_DEFAULT_LANGUAGES)
for required in REQUIRED_EXPORT_SETTINGS:
if required not in export_settings:
raise exceptions.ValidationError(
_(
"`export_settings` must contain all the following required keys: {}"
).format(
self.__format_exception_values(
REQUIRED_EXPORT_SETTINGS, 'and'
)
)
)
for key in export_settings:
if key not in VALID_EXPORT_SETTINGS:
raise exceptions.ValidationError(
_(
"`export_settings` can contain only the following valid keys: {}"
).format(
self.__format_exception_values(
VALID_EXPORT_SETTINGS, 'and'
)
)
)
if export_settings['multiple_select'] not in VALID_MULTIPLE_SELECTS:
raise exceptions.ValidationError(
_("`multiple_select` must be either {}").format(
self.__format_exception_values(VALID_MULTIPLE_SELECTS)
)
)
if export_settings['type'] not in VALID_EXPORT_TYPES:
raise exceptions.ValidationError(
_("`type` must be either {}").format(
self.__format_exception_values(VALID_EXPORT_TYPES)
)
)
for setting in ['fields_from_all_versions', 'hierarchy_in_labels']:
if export_settings[setting].lower() not in VALID_BOOLEANS:
raise exceptions.ValidationError(
_("`{}` must be either {}").format(
setting, self.__format_exception_values(VALID_BOOLEANS)
)
)
if (
export_settings['hierarchy_in_labels'].lower() == 'true'
and len(export_settings['group_sep']) == 0
):
raise exceptions.ValidationError(
_('`group_sep` must be a non-empty value')
)
if export_settings['lang'] not in all_valid_languages:
raise exceptions.ValidationError(
_("`lang` for this asset must be either {}").format(
self.__format_exception_values(all_valid_languages)
)
)
if 'fields' not in export_settings:
return export_settings
fields = export_settings['fields']
if not isinstance(fields, list):
raise exceptions.ValidationError(_('`fields` must be an array'))
if not all(map(lambda x: isinstance(x, str), fields)):
raise exceptions.ValidationError(
_('All values in the `fields` array must be strings')
)
return export_settings
def get_url(self, obj):
return reverse(
'asset-export-settings-detail',
args=(obj.asset.uid, obj.uid),
request=self.context.get('request', None),
)
@staticmethod
def __format_exception_values(values: list, sep: str = 'or') -> str:
return "{} {} '{}'".format(
', '.join([f"'{v}'" for v in values[:-1]]), sep, values[-1]
)
|
Python
| 0.000437
|
@@ -102,21 +102,9 @@
zers
-, exceptions
%0A
+
from
@@ -1716,33 +1716,34 @@
raise
-exception
+serializer
s.Validation
@@ -2181,33 +2181,34 @@
raise
-exception
+serializer
s.Validation
@@ -2628,33 +2628,34 @@
raise
-exception
+serializer
s.Validation
@@ -2910,33 +2910,34 @@
raise
-exception
+serializer
s.Validation
@@ -3266,33 +3266,34 @@
raise
-exception
+serializer
s.Validation
@@ -3638,33 +3638,34 @@
raise
-exception
+serializer
s.Validation
@@ -3822,33 +3822,34 @@
raise
-exception
+serializer
s.Validation
@@ -4207,33 +4207,34 @@
raise
-exception
+serializer
s.Validation
@@ -4353,25 +4353,26 @@
raise
-exception
+serializer
s.Valida
|
2ed332ea21c20d8e533ddcbd758755fea9da0ecd
|
Improve syntax
|
virtool/labels/api.py
|
virtool/labels/api.py
|
import virtool.http.routes
import virtool.utils
import virtool.validators
import virtool.labels.checks
import virtool.db.utils
from virtool.api.response import bad_request, json_response, no_content, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/labels")
async def find(req):
"""
Get a list of all label documents in the database.
"""
db = req.app["db"]
document = db.labels.find()
return json_response([virtool.utils.base_processor(d) async for d in document])
@routes.get("/api/labels/{label_id}")
async def get(req):
"""
Get a complete label document.
"""
document = await req.app["db"].labels.find_one(req.match_info["label_id"])
if not document:
return not_found()
return json_response(virtool.utils.base_processor(document))
@routes.post("/api/labels", schema={
"name": {
"type": "string",
"coerce": virtool.validators.strip,
"required": True,
"empty": False
},
"color": {
"type": "string",
"coerce": virtool.validators.strip,
},
"description": {
"type": "string",
"coerce": virtool.validators.strip,
"default": ""
}
})
async def create(req):
"""
Add a new label to the labels database.
"""
db = req.app["db"]
data = req["data"]
valid_color = await virtool.labels.checks.check_hex_color(req)
if not valid_color:
return bad_request("This is not a valid Hexadecimal color")
name_exist = await db.labels.count_documents({'name': data['name']})
if name_exist:
return bad_request("Label name already exists")
label_id = await virtool.db.utils.get_new_id(db.labels)
document = {
"_id": label_id,
"name": data["name"],
"color": data["color"],
"description": data["description"]
}
await db.labels.insert_one(document)
headers = {
"Location": "/api/labels/" + label_id
}
return json_response(virtool.utils.base_processor(document), status=201, headers=headers)
@routes.patch("/api/labels/{label_id}", schema={
"name": {
"type": "string",
"coerce": virtool.validators.strip,
},
"color": {
"type": "string",
"coerce": virtool.validators.strip,
},
"description": {
"type": "string",
"coerce": virtool.validators.strip,
}
})
async def edit(req):
"""
Edit an existing label.
"""
db = req.app["db"]
data = req["data"]
label_id = req.match_info["label_id"]
if data["name"]:
name_exist = await db.labels.count_documents({"_id": {"$ne": label_id}, "name": data["name"]})
if name_exist:
return bad_request("Label name already exists")
if data["color"]:
valid_color = await virtool.labels.checks.check_hex_color(req)
if not valid_color:
return bad_request("This is not a valid Hexadecimal color")
document = await db.labels.find_one_and_update({"_id": label_id}, {
"$set": data
})
if document is None:
return not_found()
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/labels/{label_id}")
async def remove(req):
"""
Remove a label.
"""
db = req.app["db"]
label_id = req.match_info["label_id"]
delete_result = await db.labels.delete_one({"_id": label_id})
if delete_result.deleted_count == 0:
return not_found()
return no_content()
|
Python
| 0.978611
|
@@ -396,24 +396,22 @@
%22%5D%0A%0A
-document
+cursor
= db.la
@@ -496,24 +496,22 @@
or d in
-document
+cursor
%5D)%0A%0A%0A@ro
@@ -712,20 +712,16 @@
if
-not
document
:%0A
@@ -716,16 +716,24 @@
document
+ is None
:%0A
@@ -1508,36 +1508,26 @@
olor%22)%0A%0A
-name_exist =
+if
await db.la
@@ -1574,27 +1574,8 @@
'%5D%7D)
-%0A%0A if name_exist
:%0A
@@ -1922,16 +1922,17 @@
ation%22:
+f
%22/api/la
@@ -1940,20 +1940,17 @@
els/
-%22 +
+%7B
label_id
%0A
@@ -1945,16 +1945,18 @@
label_id
+%7D%22
%0A %7D%0A%0A
@@ -2554,42 +2554,26 @@
if
-data%5B
%22name%22
-%5D:%0A name_exist =
+ in data and
awa
@@ -2654,37 +2654,10 @@
%22%5D%7D)
+:
%0A
-%0A if name_exist:%0A
@@ -2716,29 +2716,31 @@
%0A if
-data%5B
%22color%22
-%5D
+ in data
:%0A
|
ae9a17a5cde92efc471e46d900e52a29068083da
|
Move authenticate to UrlAuthBackendMixin.
|
sesame/backends.py
|
sesame/backends.py
|
from __future__ import unicode_literals
import hashlib
import logging
from django.conf import settings
from django.contrib.auth import backends as auth_backends
from django.contrib.auth import get_user_model
from django.core import signing
from django.core.exceptions import ImproperlyConfigured
from django.utils import crypto
from django.utils.functional import cached_property
from . import packers
logger = logging.getLogger('sesame')
class UrlAuthBackendMixin(object):
"""
Tools to authenticate against a token containing a signed user id.
Mix this class in an auth backend providing ``get_user(user_id)`` and call
``parse_token(token)`` from its ``authenticate(**credentials)``.
"""
salt = getattr(settings, 'SESAME_SALT', 'sesame')
digest = getattr(settings, 'SESAME_DIGEST', hashlib.md5)
iterations = getattr(settings, 'SESAME_ITERATIONS', 10000)
max_age = getattr(settings, 'SESAME_MAX_AGE', None)
one_time = getattr(settings, 'SESAME_ONE_TIME', False)
invalidate_on_password_change = getattr(
settings, 'SESAME_INVALIDATE_ON_PASSWORD_CHANGE', True)
def __init__(self, *args, **kwargs):
if self.max_age is None and not self.invalidate_on_password_change:
raise ImproperlyConfigured(
"Insecure configuration: set SESAME_MAX_AGE to a low value "
"or set SESAME_INVALIDATE_ON_PASSWORD_CHANGE to True")
super(UrlAuthBackendMixin, self).__init__(*args, **kwargs)
@cached_property
def signer(self):
if self.max_age is None:
return signing.Signer(salt=self.salt)
else:
return signing.TimestampSigner(salt=self.salt)
def sign(self, data):
"""
Create an URL-safe, signed token from ``data``.
"""
data = signing.b64_encode(data).decode()
return self.signer.sign(data)
def unsign(self, token):
"""
Extract the data from a signed ``token``.
"""
if self.max_age is None:
data = self.signer.unsign(token)
else:
data = self.signer.unsign(token, max_age=self.max_age)
return signing.b64_decode(data.encode())
@cached_property
def packer(self):
pk_type = get_user_model()._meta.pk.get_internal_type()
try:
Packer = packers.PACKERS[pk_type]
except KeyError:
raise NotImplementedError(
pk_type + " primary keys aren't supported at this time")
return Packer()
def get_revocation_key(self, user):
"""
When the value returned by this method changes, this revocates tokens.
It always includes the password so that changing the password revokes
existing tokens.
In addition, for one-time tokens, it also contains the last login
datetime so that logging in revokes existing tokens.
"""
value = ''
if self.invalidate_on_password_change:
value += user.password
if self.one_time:
value += str(user.last_login)
return value
def create_token(self, user):
"""
Create a signed token from a user.
"""
# The password is expected to be a secure hash but we hash it again
# for additional safety. We default to MD5 to minimize the length of
# the token. (Remember, if an attacker obtains the URL, he can already
# log in. This isn't high security.)
h = crypto.pbkdf2(
self.get_revocation_key(user),
self.salt,
self.iterations,
digest=self.digest,
)
return self.sign(self.packer.pack_pk(user.pk) + h)
def parse_token(self, token):
"""
Obtain a user from a signed token.
"""
try:
data = self.unsign(token)
except signing.SignatureExpired:
logger.debug("Expired token: %s", token)
return
except signing.BadSignature:
logger.debug("Bad token: %s", token)
return
except Exception:
logger.exception(
"Valid signature but unexpected token - if you changed "
"django-sesame settings, you must regenerate tokens")
return
user_pk, data = self.packer.unpack_pk(data)
user = self.get_user(user_pk)
if user is None:
logger.debug("Unknown token: %s", token)
return
h = crypto.pbkdf2(
self.get_revocation_key(user),
self.salt,
self.iterations,
digest=self.digest,
)
if not crypto.constant_time_compare(data, h):
logger.debug("Invalid token: %s", token)
return
logger.debug("Valid token for user %s: %s", user, token)
return user
class ModelBackend(UrlAuthBackendMixin, auth_backends.ModelBackend):
"""
Authenticates against a token containing a signed user id.
"""
def authenticate(self, request, url_auth_token=None):
"""
Check the token and return the corresponding user.
"""
try:
return self.parse_token(url_auth_token)
except TypeError:
backend = "%s.%s" % (self.__module__, self.__class__.__name__)
logger.exception("TypeError in %s, here's the traceback before "
"Django swallows it:", backend)
raise
|
Python
| 0
|
@@ -4857,158 +4857,8 @@
er%0A%0A
-%0Aclass ModelBackend(UrlAuthBackendMixin, auth_backends.ModelBackend):%0A %22%22%22%0A Authenticates against a token containing a signed user id.%0A%0A %22%22%22%0A
@@ -5317,8 +5317,159 @@
raise%0A
+%0A%0Aclass ModelBackend(UrlAuthBackendMixin, auth_backends.ModelBackend):%0A %22%22%22%0A Authenticates against a token containing a signed user id.%0A%0A %22%22%22%0A
|
b18de413007b5587b6ae4023df4de0a0f4791e79
|
load obj with decisions
|
larVolumeToObj/computation/visualization.py
|
larVolumeToObj/computation/visualization.py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import argparse
import sys
# """ import modules from lar-cc/lib """
import import_library as il
lib_path = il.find_library_path("larcc", "larcc.py")
sys.path.append(lib_path)
from larcc import * # noqa
from fileio import readFile
# input of test file nrn100.py (with definetion of V and FV)
# V = vertex coordinates
# FV = lists of vertex indices of every face (1-based, as required by pyplasm)
#
# sys.path.insert(1, '/Users/paoluzzi/Documents/RICERCA/pilsen/ricerca/')
# from nrn100 import *
def triangulateSquares(F,
a=[0, 1, 2], b=[2, 3, 0],
c=[1, 0, 2], d=[3, 2, 0]
):
"""
Convert squares to triangles
"""
FT = []
for face in F:
FT.append([face[a[0]], face[a[1]], face[a[2]]])
FT.append([face[b[0]], face[b[1]], face[b[2]]])
# FT.append([face[c[0]], face[c[1]], face[c[2]]])
# FT.append([face[d[0]], face[d[1]], face[d[2]]])
# FT.append([face[0], face[3], face[2]])
return FT
# scipy.sparse matrices required
# Computation of Vertex-to-vertex adjacency matrix
#
def check_references(V, F):
"""
Check that face is referenced to existing vertex
"""
for face in F:
lenV = len(V)
for v in face:
if v > lenV or v < 0:
return False
return True
def visualize(V, FV, explode=False):
import time
# VIEW(STRUCT(MKPOLS((V, FV))))
t0 = time.time()
mkpols = MKPOLS((V, FV))
t1 = time.time()
logger.debug("MKPOLS() done in %ss" % (str(t1 - t0)))
if explode:
VIEW(EXPLODE(1.2, 1.2, 1.2)(mkpols))
else:
struct = STRUCT(mkpols)
t2 = time.time()
logger.debug("STRUCT() done in %ss" % (str(t2 - t1)))
VIEW(struct)
def visualize_plasm(V, FV):
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
if len(FV[0]) > 3:
FV = triangulateSquares(FV)
logger.debug("triangulation done")
FV1 = (np.asarray(FV) + 1).tolist()
logger.debug(" + 1 done")
VIEW(MKPOL([V, FV1, []]))
# VIEW(MKPOL([V, AA(AA(lambda k:k + 1))(FV), []]))
def visualizeObj(objfile, explode=False):
V, FV = readFile(objfile, ftype='obj')
visualize(V, FV, explode)
def main():
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(
description="Obj file visualization"
)
parser.add_argument(
'-i', '--inputfile',
default=None,
required=True,
help='input file'
)
parser.add_argument(
'-ft', '--filetype',
default='auto',
help='filetype'
)
parser.add_argument(
'-v', '--visualization', action='store_true',
help='Use visualization')
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
V, FV = readFile(args.inputfile, ftype=args.filetype)
logger.info("Data readed from ' %s" % (args.inputfile))
visualize_plasm(V, FV)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -1534,24 +1534,221 @@
de=False):%0D%0A
+ if explode:%0D%0A visualize_lar(V, FV, explode)%0D%0A else:%0D%0A # if you dont need explode, this is faster%0D%0A visualize_plasm(V, FV)%0D%0A%0D%0A%0D%0Adef visualize_lar(V, FV, explode=False):%0D%0A
import t
@@ -2546,48 +2546,528 @@
-V, FV = readFile(objfile, ftype='obj')%0D%0A
+%22%22%22%0D%0A Try use Batch.openObj it is fast. But cannot read quadrilaterals and%0D%0A floating point number.%0D%0A If it fail there is backup version.%0D%0A If explode fucntionality is wanted, it is always using LAR visualization%0D%0A wich is slow.%0D%0A %22%22%22%0D%0A import step_loadmodel%0D%0A if explode:%0D%0A try:%0D%0A step_loadmodel(objfile)%0D%0A except:%0D%0A V, FV = readFile(objfile, ftype='obj')%0D%0A visualize(V, FV, explode)%0D%0A else:%0D%0A V, FV = readFile(objfile, ftype='obj')%0D%0A
@@ -3781,32 +3781,146 @@
.add_argument(%0D%0A
+ '-e', '--explode', action='store_true',%0D%0A help='Explode mode. Slower.')%0D%0A parser.add_argument(%0D%0A
'-d', '-
@@ -4077,16 +4077,18 @@
)%0D%0A%0D%0A
+ #
V, FV =
@@ -4136,24 +4136,26 @@
type)%0D%0A%0D%0A
+ #
logger.info
@@ -4209,36 +4209,64 @@
visualize
-_plasm(V, FV
+Obj(args.inputfile, explode=args.explode
)%0D%0A%0D%0Aif __na
|
b6371a582ca944094b1c7955f2c9e908535ccc5d
|
clean import
|
virtuoso/textindex.py
|
virtuoso/textindex.py
|
from sqlalchemy import Column
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.schema import _CreateDropBase, Table, Index
from sqlalchemy.sql.expression import (
TextClause, func, literal_column, ColumnCollection, ClauseElement)
from sqlalchemy.sql import ddl
from sqlalchemy.sql.base import _bind_or_error
class TextIndex(Index):
__visit_name__ = 'text_index'
def __init__(
self, column, clusters=None, key=None, language=None,
encoding=None, do_insert=True, transform=None):
column = self.normalize_column(column)
self.column = column
self.table = None
super(TextIndex, self).__init__(None, column)
self.clusters = [self.normalize_column(c) for c in (clusters or ())]
self.key = self.normalize_column(key) if key else None
self.language = language
self.encoding = encoding
self.do_insert = do_insert
self.transform = transform
def _set_parent(self, table):
super(TextIndex, self)._set_parent(table)
self.name = "{table}_{column}_WORDS".format(
table=table.name,
column=self.column.name)
@staticmethod
def normalize_column(column):
if isinstance(column, str):
pass # convert to column
if isinstance(column, InstrumentedAttribute):
mapper = column.parent
column = mapper.c[column.name]
assert isinstance(column, Column)
return column
def contains(self, query_str, ranges=None, offband=None, descending=False, score_limit=None,
start_id=None, end_id=None):
"""Creates a clause with contains arguments"""
args = [self.column, query_str]
if descending:
args.append(literal_column('DESCENDING'))
if start_id:
args.extend((literal_column('START_ID'), start_id))
if end_id:
args.extend((literal_column('END_ID'), end_id))
if score_limit:
args.extend((literal_column('SCORE_LIMIT'), score_limit))
if ranges:
# Should be an alias
args.extend((literal_column('RANGES'), ranges))
if offband is None:
offband = self.clusters
else:
offband = [self.normalize_column(c) for c in offband]
for c in offband:
args.extend((literal_column('OFFBAND'), c))
return func.contains(*args)
score_name = literal_column('SCORE')
class CreateTextIndex(_CreateDropBase):
"""Represent a CREATE TEXT INDEX statement."""
__visit_name__ = "create_text_index"
class DropTextIndex(_CreateDropBase):
"""Represent a DROP TEXT INDEX statement."""
__visit_name__ = "drop_text_index"
class SchemaGeneratorWithTextIndex(ddl.SchemaGenerator):
def visit_text_index(self, index):
self.connection.execute(CreateTextIndex(index))
class SchemaDropperWithTextIndex(ddl.SchemaDropper):
def visit_table(self, table, drop_ok=False, _is_metadata_operation=False):
if not drop_ok and not self._can_drop_table(table):
return
# Ideally should come before the hook, but this will do
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
super(SchemaDropperWithTextIndex, self).visit_table(
table, drop_ok, _is_metadata_operation)
def visit_text_index(self, index):
self.connection.execute(DropTextIndex(index))
class TableWithTextIndex(Table):
def create(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(SchemaGeneratorWithTextIndex,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(SchemaDropperWithTextIndex,
self,
checkfirst=checkfirst)
|
Python
| 0.000001
|
@@ -186,25 +186,8 @@
rt (
-%0A TextClause,
func
@@ -206,41 +206,8 @@
lumn
-, ColumnCollection, ClauseElement
)%0Afr
|
df5b98422b1198f353cf3b0df20429b29334bd06
|
Add separator kwarg to `StringCommand` init.
|
pyinfra/api/command.py
|
pyinfra/api/command.py
|
from six.moves import shlex_quote
from .operation_kwargs import get_executor_kwarg_keys
class MaskString(str):
pass
class QuoteString(object):
def __init__(self, obj):
self.object = obj
class PyinfraCommand(object):
def __init__(self, *args, **kwargs):
self.executor_kwargs = {
key: kwargs[key]
for key in get_executor_kwarg_keys()
if key in kwargs
}
def __eq__(self, other):
if isinstance(other, self.__class__) and repr(self) == repr(other):
return True
return False
class StringCommand(PyinfraCommand):
def __init__(self, *bits, **kwargs):
super(StringCommand, self).__init__(**kwargs)
self.bits = bits
def __str__(self):
return self.get_masked_value()
def __repr__(self):
return 'StringCommand({0})'.format(self.get_masked_value())
def _get_all_bits(self, bit_accessor):
all_bits = []
for bit in self.bits:
quote = False
if isinstance(bit, QuoteString):
quote = True
bit = bit.object
if isinstance(bit, StringCommand):
bit = bit_accessor(bit)
if quote:
bit = shlex_quote(bit)
all_bits.append(bit)
return all_bits
def get_raw_value(self):
return ' '.join(self._get_all_bits(lambda bit: bit.get_raw_value()))
def get_masked_value(self):
return ' '.join([
'***' if isinstance(bit, MaskString) else bit
for bit in self._get_all_bits(lambda bit: bit.get_masked_value())
])
class FileUploadCommand(PyinfraCommand):
def __init__(self, src, dest, **kwargs):
super(FileUploadCommand, self).__init__(**kwargs)
self.src = src
self.dest = dest
def __repr__(self):
return 'FileUploadCommand({0}, {1})'.format(self.src, self.dest)
class FileDownloadCommand(PyinfraCommand):
def __init__(self, src, dest, **kwargs):
super(FileDownloadCommand, self).__init__(**kwargs)
self.src = src
self.dest = dest
def __repr__(self):
return 'FileDownloadCommand({0}, {1})'.format(self.src, self.dest)
class FunctionCommand(PyinfraCommand):
def __init__(self, function, args, func_kwargs, **kwargs):
super(FunctionCommand, self).__init__(**kwargs)
self.function = function
self.args = args
self.kwargs = func_kwargs
def __repr__(self):
return 'FunctionCommand({0}, {1}, {2})'.format(
self.function.__name__,
self.args,
self.kwargs,
)
|
Python
| 0
|
@@ -642,16 +642,31 @@
, *bits,
+ separator=' ',
**kwarg
@@ -743,24 +743,59 @@
.bits = bits
+%0A self.separator = separator
%0A%0A def __
@@ -1419,27 +1419,38 @@
return
-' '
+self.separator
.join(self._
@@ -1454,32 +1454,45 @@
f._get_all_bits(
+%0A
lambda bit: bit.
@@ -1506,16 +1506,26 @@
_value()
+,%0A
))%0A%0A
@@ -1567,19 +1567,30 @@
return
-' '
+self.separator
.join(%5B%0A
|
771daafda877050c8fe23b034a0c51ec97502715
|
update code which generates list of possible article names
|
pages/controllers/blog_article.py
|
pages/controllers/blog_article.py
|
from core import database as database
from core.exceptions import NotFoundError, ServerError
from core.markdown import MarkdownParser
from core.article_helpers import get_article
import core.functions
import yaml
def get_page_data(path, get, post, variables):
article = get_article(get.get('name', ''))
if not article:
raise NotFoundError("No article with name: '{}'".format(get.get('name', '')))
markdownParser = MarkdownParser('blog/%s/' % (article.get('name')))
raw_articule = article['body']
article['body'] = markdownParser.render(article['body'])
return {
'article': article,
'title': article.get('title', ''),
'raw_article': raw_articule
}
def get_possible_paths():
articles = database.Table('article').filter()
queries = []
for article in articles:
queries.append('blog/%s' % article.get('name'))
return queries
|
Python
| 0.00004
|
@@ -171,16 +171,34 @@
_article
+, get_all_articles
%0Aimport
@@ -738,40 +738,24 @@
s =
-database.Table('article').filter
+get_all_articles
()%0A
|
7060f48df582dcfae1768cc37d00a25e0e2e1f6f
|
Comment post endpoint return a ksopn, fix issue saving comments add post id and convert it to int
|
app/views/comment_view.py
|
app/views/comment_view.py
|
from flask import jsonify
from flask_classy import FlaskView
from flask_user import current_user, login_required
from ..models import CommentModel, PostModel
from ..forms import CommentForm
class Comment(FlaskView):
def get(self):
pass
def all(self, post_id):
comment = CommentModel()
comment.query.add_filter('post_id', '=', int(post_id))
return jsonify(comment.fetch())
@login_required
def post(self, post_id):
form = CommentForm()
if form.validate_on_submit():
post = PostModel().get(post_id)
post = PostModel(**post)
comment = CommentModel(user=current_user.username, **form.data)
comment.put()
post.add_comment(comment.id)
return "ALEYUYA"
return "form.errors"
|
Python
| 0.000025
|
@@ -669,16 +669,108 @@
sername,
+%0A post_id=int(post_id),%0A
**form.
@@ -775,16 +775,16 @@
m.data)%0A
-
@@ -865,17 +865,29 @@
urn
-%22ALEYUYA%22
+jsonify(comment.data)
%0A
|
c1044e25e18afd78b3fda8fd9b00a4f67cfbbc65
|
allow markdownlint to be disabled for specific lines (#4)
|
pymarkdownlint/lint.py
|
pymarkdownlint/lint.py
|
from __future__ import print_function
from pymarkdownlint import rules
class MarkdownLinter(object):
def __init__(self, config):
self.config = config
@property
def line_rules(self):
return [rule for rule in self.config.rules if isinstance(rule, rules.LineRule)]
def _apply_line_rules(self, markdown_string):
""" Iterates over the lines in a given markdown string and applies all the enabled line rules to each line """
all_violations = []
lines = markdown_string.split("\n")
line_rules = self.line_rules
line_nr = 1
for line in lines:
for rule in line_rules:
violation = rule.validate(line)
if violation:
violation.line_nr = line_nr
all_violations.append(violation)
line_nr += 1
return all_violations
def lint(self, markdown_string):
all_violations = []
all_violations.extend(self._apply_line_rules(markdown_string))
return all_violations
def lint_files(self, files):
""" Lints a list of files.
:param files: list of files to lint
:return: a list of violations found in the files
"""
all_violations = []
for filename in files:
with open(filename, 'r') as f:
content = f.read()
violations = self.lint(content)
all_violations.extend(violations)
for e in violations:
print("{0}:{1}: {2} {3}".format(filename, e.line_nr, e.rule_id, e.message))
return len(all_violations)
|
Python
| 0.000002
|
@@ -581,24 +581,49 @@
line_nr = 1%0A
+ ignoring = False%0A
for
@@ -633,24 +633,309 @@
e in lines:%0A
+ if ignoring:%0A if line.strip() == '%3C!-- markdownlint:enable --%3E':%0A ignoring = False%0A else:%0A if line.strip() == '%3C!-- markdownlint:disable --%3E':%0A ignoring = True%0A continue%0A%0A
@@ -954,24 +954,28 @@
line_rules:%0A
+
@@ -1026,16 +1026,20 @@
+
+
if viola
@@ -1044,16 +1044,20 @@
lation:%0A
+
@@ -1096,16 +1096,20 @@
line_nr%0A
+
|
e4ecc0f8049f1388188f0a64b373a7e90b2dc1e9
|
Update at 2017-07-22 15-01-48
|
plot.py
|
plot.py
|
from sys import argv
import matplotlib as mpl
mpl.use('Agg')
import seaborn as sns
sns.set_style("darkgrid")
import matplotlib.pyplot as plt
import pandas as pd
# from keras.utils import plot_model
# plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=False)
def plot_svg(log, name):
df = pd.read_csv(log)
graph = Path('./graph/')
loss_path = graph / (name + '_loss.svg')
acc_path = graph / (name + '_acc.svg')
keys = ['loss', 'val_loss']
ax = df[keys].plot(kind='line')
ax.set_xlabel('epoch')
ax.set_ylabel('loss(binary crossentropy)')
plt.savefig(str(loss_path))
keys = ['binary_accuracy', 'val_binary_accuracy']
ax = df[keys].plot(kind='line')
ax.set_xlabel('epoch')
ax.set_ylabel('accuracy')
plt.savefig(str(acc_path))
if __name__ == '__main__':
log, name = argv[1], argv[2]
plot_svg(log, name)
|
Python
| 0
|
@@ -14,16 +14,41 @@
rt argv%0A
+from pathlib import Path%0A
import m
|
d783efe4d5e81a1049ff0cb02c96d32ce371a434
|
Add handling for MozillaCookieJar for persistence
|
simplemediawiki.py
|
simplemediawiki.py
|
# python-simplemediawiki - Extremely low-level wrapper to the MediaWiki API
# Copyright (C) 2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
"""
simplemediawiki is an extremely low-level wrapper to the MediaWiki API. It
automatically handles cookies and gzip compression so that you can make basic
calls to the API in the easiest way possible. It also provides a few functions
to make day-to-day API access easier.
To use this module, instantiate a MediaWiki object, passing it the URL of
api.php for the wiki you want to work with. Calls go through MediaWiki.call().
A generic login wrapper as well as functions to determine limits and get a list
of namespaces are provided for your convenience.
>>> from simplemediawiki import MediaWiki
>>> wiki = MediaWiki('http://en.wikipedia.org/w/api.php')
>>> wiki.call({'action': 'query', 'prop': 'revisions', 'titles': 'Main Page'})
{u'query': {u'pages': {...}}}
"""
import cookielib
import gzip
from iso8601 import iso8601
import json
from StringIO import StringIO
import urllib
import urllib2
class MediaWiki():
"""
Class to represent a MediaWiki installation with an enabled API.
api_url: URL to api.php (usually similar to http://example.com/w/api.php)
"""
_cj = cookielib.CookieJar()
_high_limits = None
_namespaces = None
_psuedo_namespaces = None
def __init__(self, api_url):
self._api_url = api_url
def call(self, params):
"""
Make a call to the wiki. Returns a dictionary that represents the JSON
returned by the API.
"""
params['format'] = 'json'
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj))
request = urllib2.Request(self._api_url, urllib.urlencode(params))
request.add_header('Accept-encoding', 'gzip')
response = opener.open(request)
if response.headers.get('Content-Encoding') == 'gzip':
compressed = StringIO(response.read())
gzipper = gzip.GzipFile(fileobj=compressed)
data = gzipper.read()
else:
data = response.read()
return json.loads(data)
def login(self, user, passwd, token=None):
"""
Convenience function for logging into the wiki. It should never be
necessary to provide a token argument; it is part of the login process
since MediaWiki 1.15.3 (see MediaWiki bug 23076).
"""
data = {'action': 'login',
'lgname': user,
'lgpassword': passwd}
if token:
data['lgtoken'] = token
result = self.call(data)
if result['login']['result'] == 'Success':
return True
elif result['login']['result'] == 'NeedToken' and not token:
return self.login(user, passwd, result['login']['token'])
else:
return False
def limits(self, low, high):
"""
Convenience function for determining appropriate limits in the API. If
the logged in user has the "apihighlimits" right, it will return the
high argument; otherwise it will return the low argument.
"""
if self._high_limits == None:
result = self.call({'action': 'query',
'meta': 'userinfo',
'uiprop': 'rights'})
self._high_limits = 'apihighlimits' in \
result['query']['userinfo']['rights']
if self._high_limits:
return high
else:
return low
def namespaces(self, psuedo=True):
"""
Fetches a list of namespaces for this wiki.
"""
if self._namespaces == None:
result = self.call({'action': 'query',
'meta': 'siteinfo',
'siprop': 'namespaces'})
self._namespaces = {}
self._psuedo_namespaces = {}
for nsid in result['query']['namespaces']:
if int(nsid) >= 0:
self._namespaces[int(nsid)] = \
result['query']['namespaces'][nsid]['*']
else:
self._psuedo_namespaces[int(nsid)] = \
result['query']['namespaces'][nsid]['*']
if psuedo:
retval = {}
retval.update(self._namespaces)
retval.update(self._psuedo_namespaces)
return retval
else:
return self._namespaces
@staticmethod
def parse_date(date):
"""
Converts dates provided by the MediaWiki API into datetime.datetime
objects.
"""
return iso8601.parse_date(date)
__author__ = 'Ian Weller <ian@ianweller.org>'
__version__ = '1.0'
|
Python
| 0
|
@@ -1846,40 +1846,8 @@
%22%22%22%0A
- _cj = cookielib.CookieJar()%0A
@@ -1950,16 +1950,34 @@
api_url
+, cookie_file=None
):%0A
@@ -2002,16 +2002,386 @@
api_url
+%0A if cookie_file:%0A self._cj = cookielib.MozillaCookieJar(cookie_file)%0A try:%0A self._cj.load()%0A except IOError:%0A self._cj.save()%0A self._cj.load()%0A else:%0A self._cj = cookielib.CookieJar()%0A self._opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj))
%0A%0A de
@@ -2572,85 +2572,8 @@
on'%0A
- opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj))%0A
@@ -2716,16 +2716,22 @@
ponse =
+self._
opener.o
@@ -2743,16 +2743,105 @@
equest)%0A
+ if isinstance(self._cj, cookielib.MozillaCookieJar):%0A self._cj.save()%0A
|
dc3d5ae5b6b4bed40218a6dd6beb33525c8aaa51
|
Comment out some lines in test, MPICH2 generates misterious warning
|
test/test_file.py
|
test/test_file.py
|
from mpi4py import MPI
import mpiunittest as unittest
import os, tempfile
class TestFileBase(object):
COMM = MPI.COMM_NULL
FILE = MPI.FILE_NULL
prefix = 'mpi4py'
def setUp(self):
self.fd, self.fname = tempfile.mkstemp(prefix=self.prefix)
self.amode = MPI.MODE_RDWR | MPI.MODE_CREATE
#self.amode |= MPI.MODE_DELETE_ON_CLOSE
try:
self.FILE = MPI.File.Open(self.COMM,
self.fname, self.amode,
MPI.INFO_NULL)
except Exception:
os.close(self.fd)
os.remove(self.fname)
raise
def tearDown(self):
if self.FILE == MPI.FILE_NULL: return
os.close(self.fd)
amode = self.FILE.amode
self.FILE.Close()
if not (amode & MPI.MODE_DELETE_ON_CLOSE):
MPI.File.Delete(self.fname, MPI.INFO_NULL)
def testPreallocate(self):
self.FILE.Preallocate(0)
size = self.FILE.Get_size()
self.assertEqual(size, 0)
self.FILE.Preallocate(1)
size = self.FILE.Get_size()
self.assertEqual(size, 1)
self.FILE.Preallocate(100)
size = self.FILE.Get_size()
self.assertEqual(size, 100)
self.FILE.Preallocate(10)
size = self.FILE.Get_size()
self.assertEqual(size, 100)
self.FILE.Preallocate(200)
size = self.FILE.Get_size()
self.assertEqual(size, 200)
def testGetSetSize(self):
size = self.FILE.Get_size()
self.assertEqual(size, 0)
size = self.FILE.size
self.assertEqual(size, 0)
self.FILE.Set_size(100)
size = self.FILE.Get_size()
self.assertEqual(size, 100)
size = self.FILE.size
self.assertEqual(size, 100)
def testGetGroup(self):
fgroup = self.FILE.Get_group()
cgroup = self.COMM.Get_group()
gcomp = MPI.Group.Compare(fgroup, cgroup)
self.assertEqual(gcomp, MPI.IDENT)
fgroup.Free()
cgroup.Free()
def testGetAmode(self):
amode = self.FILE.Get_amode()
self.assertEqual(self.amode, amode)
self.assertEqual(self.FILE.amode, self.amode)
def testGetSetInfo(self):
info = self.FILE.Get_info()
self.FILE.Set_info(info)
info.Free()
def testGetSetView(self):
fsize = 100 * MPI.DOUBLE.size
self.FILE.Set_size(fsize)
displacements = range(100)
datatypes = [MPI.SHORT, MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE]
datareps = ['native'] #['native', 'internal', 'external32']
for disp in displacements:
for dtype in datatypes:
for datarep in datareps:
etype, ftype = dtype, dtype
self.FILE.Set_view(disp, etype, ftype,
datarep, MPI.INFO_NULL)
of, et, ft, dr = self.FILE.Get_view()
self.assertEqual(disp, of)
self.assertEqual(etype, et)
self.assertEqual(ftype, ft)
self.assertEqual(datarep, dr)
#try: et.Free()
#except MPI.Exception: pass
#try: ft.Free()
#except MPI.Exception: pass
def testGetSetAtomicity(self):
atom = self.FILE.Get_atomicity()
self.assertFalse(atom)
for atomicity in [True, False] * 4:
self.FILE.Set_atomicity(atomicity)
atom = self.FILE.Get_atomicity()
self.assertEqual(atom, atomicity)
def testSync(self):
self.FILE.Sync()
def testSeekGetPosition(self):
offset = 0
self.FILE.Seek(offset, MPI.SEEK_END)
self.FILE.Seek(offset, MPI.SEEK_CUR)
self.FILE.Seek(offset, MPI.SEEK_SET)
pos = self.FILE.Get_position()
self.assertEqual(pos, offset)
def testSeekGetPositionShared(self):
offset = 0
self.FILE.Seek_shared(offset, MPI.SEEK_END)
self.FILE.Seek_shared(offset, MPI.SEEK_CUR)
self.FILE.Seek_shared(offset, MPI.SEEK_SET)
pos = self.FILE.Get_position_shared()
self.assertEqual(pos, offset)
def testGetByteOffset(self):
for offset in range(10):
disp = self.FILE.Get_byte_offset(offset)
self.assertEqual(disp, offset)
def testGetTypeExtent(self):
extent = self.FILE.Get_type_extent(MPI.BYTE)
self.assertEqual(extent, 1)
def testGetErrhandler(self):
eh = self.FILE.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_RETURN)
eh.Free()
class TestFileNull(unittest.TestCase):
def setUp(self):
self.eh_bak = MPI.FILE_NULL.Get_errhandler()
def tearDown(self):
MPI.FILE_NULL.Set_errhandler(self.eh_bak)
self.eh_bak.Free()
def testGetSetErrhandler(self):
eh = MPI.FILE_NULL.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_RETURN)
eh.Free()
MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_ARE_FATAL)
eh = MPI.FILE_NULL.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_ARE_FATAL)
eh.Free()
MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_RETURN)
eh = MPI.FILE_NULL.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_RETURN)
eh.Free()
class TestFileSelf(TestFileBase, unittest.TestCase):
COMM = MPI.COMM_SELF
prefix = TestFileBase.prefix + ('-%d' % MPI.COMM_WORLD.Get_rank())
_name, _version = MPI.get_vendor()
if _name == 'Open MPI':
if (_version < (1,2,7) and \
MPI.Query_thread() > MPI.THREAD_SINGLE):
del TestFileBase.testPreallocate
del TestFileBase.testGetSetInfo
del TestFileBase.testGetSetAtomicity
del TestFileBase.testSync
del TestFileBase.testGetSetSize
del TestFileBase.testGetSetView
del TestFileBase.testGetByteOffset
del TestFileBase.testGetTypeExtent
del TestFileBase.testSeekGetPosition
del TestFileBase.testSeekGetPositionShared
else:
try:
dummy = TestFileBase()
dummy.COMM = MPI.COMM_SELF
dummy.setUp()
dummy.tearDown()
del dummy
except NotImplementedError:
del TestFileNull
del TestFileBase
del TestFileSelf
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -938,32 +938,126 @@
(self):%0A
+## XXX MPICH2 emits a nesting level warning%0A ## when preallocating zero size.%0A #
self.FILE.Preall
@@ -1065,32 +1065,33 @@
cate(0)%0A
+#
size = self.FILE
@@ -1102,32 +1102,33 @@
_size()%0A
+#
self.assertEqual
|
d426ce1a4c00abe08444efcf330f12ebc2571271
|
Fix #36, Import print_function to support python3 print syntax
|
resin/settings.py
|
resin/settings.py
|
import ConfigParser
import os.path as Path
import os
import shutil
import sys
from . import exceptions
from .resources import Message
class Settings(object):
"""
This class handles settings for Resin Python SDK.
Attributes:
HOME_DIRECTORY (str): home directory path.
CONFIG_SECTION (str): section name in configuration file.
CONFIG_FILENAME (str): configuration file name.
_setting (dict): default value to settings.
"""
HOME_DIRECTORY = Path.expanduser('~')
CONFIG_SECTION = 'Settings'
CONFIG_FILENAME = 'resin.cfg'
_setting = {
'pine_endpoint': 'https://api.resin.io/ewa/',
'api_endpoint': 'https://api.resin.io/',
'data_directory': Path.join(HOME_DIRECTORY, '.resin'),
# cache time : 1 week in milliseconds
'image_cache_time': (1 * 1000 * 60 * 60 * 24 * 7),
# token refresh interval: 1 hour in milliseconds
'token_refresh_interval': (1 * 1000 * 60 * 60)
}
_setting['cache_directory'] = Path.join(_setting['data_directory'],
'cache')
def __init__(self):
config_file_path = Path.join(self._setting['data_directory'],
self.CONFIG_FILENAME)
try:
self.__read_settings()
except:
# Backup old settings file if it exists.
try:
if Path.isfile(config_file_path):
shutil.move(config_file_path,
Path.join(self._setting['data_directory'],
"{0}.{1}".format(self.CONFIG_FILENAME,
'old')))
except OSError:
pass
self.__write_settings()
print(Message.INVALID_SETTINGS.format(path=config_file_path), file=sys.stderr)
def __write_settings(self):
config = ConfigParser.ConfigParser()
config.add_section(self.CONFIG_SECTION)
for key in self._setting:
config.set(self.CONFIG_SECTION, key, self._setting[key])
if not Path.isdir(self._setting['data_directory']):
os.makedirs(self._setting['data_directory'])
with open(Path.join(self._setting['data_directory'],
self.CONFIG_FILENAME), 'wb') as config_file:
config.write(config_file)
def __read_settings(self):
config_reader = ConfigParser.ConfigParser()
config_reader.read(Path.join(self._setting['data_directory'],
self.CONFIG_FILENAME))
config_data = {}
options = config_reader.options(self.CONFIG_SECTION)
for option in options:
try:
config_data[option] = config_reader.get(self.CONFIG_SECTION,
option)
except:
config_data[option] = None
self._setting = config_data
def has(self, key):
"""
Check if a setting exists.
Args:
key (str): setting.
Returns:
bool: True if exists, False otherwise.
Examples:
>>> resin.settings.has('api_endpoint')
True
"""
self.__read_settings()
if key in self._setting:
return True
return False
def get(self, key):
"""
Get a setting value.
Args:
key (str): setting.
Returns:
str: setting value.
Raises:
InvalidOption: If getting a non-existent setting.
Examples:
>>> resin.settings.get('api_endpoint')
'https://api.resin.io/'
"""
try:
self.__read_settings()
return self._setting[key]
except KeyError:
raise exceptions.InvalidOption(key)
def get_all(self):
"""
Get all settings.
Returns:
dict: all settings.
Examples:
>>> resin.settings.get_all()
{'image_cache_time': '604800000', 'api_endpoint': 'https://api.resin.io/', 'data_directory': '/root/.resin', 'token_refresh_interval': '3600000', 'cache_directory': '/root/.resin/cache', 'pine_endpoint': 'https://api.resin.io/ewa/'}
"""
self.__read_settings()
return self._setting
def set(self, key, value):
"""
Set value for a setting.
Args:
key (str): setting.
value (str): setting value.
Examples:
>>> resin.settings.set(key='tmp',value='123456')
(Empty Return)
"""
self._setting[key] = str(value)
self.__write_settings()
def remove(self, key):
"""
Remove a setting.
Args:
key (str): setting.
Returns:
bool: True if successful, False otherwise.
Examples:
# Remove an existing key from settings
>>> resin.settings.remove('tmp')
True
# Remove a non-existing key from settings
>>> resin.settings.remove('tmp1')
False
"""
# if key is not in settings, return False
result = self._setting.pop(key, False)
if result is not False:
self.__write_settings()
return True
return False
|
Python
| 0
|
@@ -1,12 +1,51 @@
+from __future__ import print_function%0A%0A
import Confi
|
6e7dfe97cdce58f892f88560e4b4709e6625e6bd
|
Clean up package level imports
|
metatlas/__init__.py
|
metatlas/__init__.py
|
__version__ = '0.2'
from .mzml_loader import mzml_to_hdf
from .h5_query import plot_heatmap, plot_spectrogram, plot_xic
from .h5_query import get_data, get_XIC, get_HeatMapRTMZ, get_spectrogram
|
Python
| 0
|
@@ -113,11 +113,11 @@
lot_
-xic
+XIC
%0Afro
@@ -162,19 +162,15 @@
get_
-HeatM
+heatm
ap
-RTMZ
, ge
|
db6a6da8fe1bdd73fbd971153a4fda6975fc7b4e
|
update version
|
methylpy/__init__.py
|
methylpy/__init__.py
|
__version__ = '1.2.8'
|
Python
| 0
|
@@ -16,7 +16,7 @@
1.2.
-8
+9
'%0A
|
8234a22ca090c38b80ffd650b490d1dd8cbe766d
|
test for fix/18
|
test/test_ipv4.py
|
test/test_ipv4.py
|
from csirtg_indicator import Indicator
from csirtg_indicator.exceptions import InvalidIndicator
def _not(data):
for d in data:
d = Indicator(d)
assert d.itype is not 'ipv4'
def test_ipv4_ipv6():
data = ['2001:1608:10:147::21', '2001:4860:4860::8888']
_not(data)
def test_ipv4_fqdn():
data = ['example.org', '1.2.3.4.com', 'xn----jtbbmekqknepg3a.xn--p1ai']
_not(data)
def test_ipv4_urls():
data = [
'http://192.168.1.1/1.html',
'http://www41.xzmnt.com',
'http://get.ahoybest.com/n/3.6.16/12205897/microsoft lync server 2010.exe'
]
_not(data)
def test_ipv4_ok():
data = ['192.168.1.0/24', '192.168.1.1', '255.255.255.255']
for d in data:
assert Indicator(indicator=d).itype is 'ipv4'
def test_ipv4_nok():
data = ['127.0.0.0/1', '128.205.0.0/8']
for d in data:
try:
Indicator(indicator=d)
except InvalidIndicator as e:
pass
else:
raise SystemError('mis-handled network')
def test_ipv4_private():
data = [
'128.205.1.0/24', '2001:1608:10:147::21', '2001:4860::8888/64',
u'106.51.30.0', '112.133.246.73'
]
for d in data:
assert not Indicator(indicator=d).is_private()
assert Indicator('192.168.1.1').is_private()
|
Python
| 0
|
@@ -1285,26 +1285,27 @@
cator('1
-9
+7
2.16
-8.1.1
+.30.32
').is_pr
|
a8090276b86e12a798be56000dc9831b07544ead
|
disable review test for now
|
test/test_main.py
|
test/test_main.py
|
import os
import sys
import unittest
from mock import patch
import json
import shutil
import satsearch.main as main
import satsearch.config as config
from nose.tools import raises
testpath = os.path.dirname(__file__)
config.DATADIR = testpath
class Test(unittest.TestCase):
""" Test main module """
args = '--date 2017-01-01 --satellite_name Landsat-8'.split(' ')
def test_main(self):
""" Run main function """
scenes = main.main(date='2017-01-01', satellite_name='Landsat-8')
self.assertEqual(len(scenes.scenes), 564)
def test_main_options(self):
""" Test main program with output options """
fname = os.path.join(testpath, 'test_main-save.json')
scenes = main.main(date='2017-01-01', satellite_name='Landsat-8', save=fname, printsearch=True, printcal=True, printmd=[])
self.assertEqual(len(scenes.scenes), 564)
self.assertTrue(os.path.exists(fname))
os.remove(fname)
self.assertFalse(os.path.exists(fname))
@raises(ValueError)
def test_main_review_error(self):
""" Run review feature without envvar set """
scenes = main.main(date='2017-01-01', satellite_name='Landsat-8', review=True)
def test_cli(self):
""" Run CLI program """
with patch.object(sys, 'argv', ['testprog'] + self.args):
n = main.cli()
self.assertEqual(n, 564)
def test_main_download(self):
""" Test main program with downloading """
with open(os.path.join(testpath, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
scenes = main.main(date_from='2017-01-05', date_to='2017-01-21', satellite_name='Landsat-8',
intersects=aoi, download=['thumb', 'MTL'])
for scene in scenes.scenes:
self.assertTrue(os.path.exists(scene.filenames['thumb']))
self.assertTrue(os.path.exists(scene.filenames['MTL']))
shutil.rmtree(os.path.join(testpath, scene.platform))
|
Python
| 0
|
@@ -1030,32 +1030,33 @@
eError)%0A def
+_
test_main_review
@@ -1119,24 +1119,58 @@
var set %22%22%22%0A
+ os.setenv('IMGCAT', None)%0A
scen
|
aa3e36cc37b2ddcc5d166965f8abeff560e6b0f1
|
Use test database on alembic when necessary
|
migrations/config.py
|
migrations/config.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from logging.handlers import SysLogHandler
from dotenv import load_dotenv
load_dotenv('.env')
# Storage
DATABASE_URL = os.environ['DATABASE_URL']
# Logging
logging.basicConfig(level=logging.DEBUG)
if os.environ.get('LOGGING_URL', None):
root_logger = logging.getLogger()
host, port = os.environ['LOGGING_URL'].split(':')
syslog_handler = SysLogHandler(address=(host, int(port)))
syslog_handler.setLevel(logging.INFO)
root_logger.addHandler(syslog_handler)
|
Python
| 0
|
@@ -299,16 +299,54 @@
torage%0A%0A
+if not os.environ.get('TESTING'):%0A
DATABASE
@@ -379,16 +379,74 @@
E_URL'%5D%0A
+else:%0A DATABASE_URL = os.environ%5B'TEST_DATABASE_URL'%5D%0A%0A
%0A# Loggi
|
0d8766849bedea43cf2eab006327cb942f61c3af
|
add testing function
|
test/test_yaml.py
|
test/test_yaml.py
|
from __future__ import division, absolute_import, print_function
import confuse
import yaml
import unittest
from . import TempDir
def load(s):
return yaml.load(s, Loader=confuse.Loader)
class ParseTest(unittest.TestCase):
def test_dict_parsed_as_ordereddict(self):
v = load("a: b\nc: d")
self.assertTrue(isinstance(v, confuse.OrderedDict))
self.assertEqual(list(v), ['a', 'c'])
def test_string_beginning_with_percent(self):
v = load("foo: %bar")
self.assertEqual(v['foo'], '%bar')
class FileParseTest(unittest.TestCase):
def _parse_contents(self, contents):
with TempDir() as temp:
path = temp.sub('test_config.yaml', contents)
return confuse.load_yaml(path)
def test_load_file(self):
v = self._parse_contents(b'foo: bar')
self.assertEqual(v['foo'], 'bar')
def test_syntax_error(self):
try:
self._parse_contents(b':')
except confuse.ConfigError as exc:
self.assertTrue('test_config.yaml' in exc.filename)
else:
self.fail('ConfigError not raised')
def test_tab_indentation_error(self):
try:
self._parse_contents(b"foo:\n\tbar: baz")
except confuse.ConfigError as exc:
self.assertTrue('found tab' in exc.args[0])
else:
self.fail('ConfigError not raised')
|
Python
| 0.000004
|
@@ -1119,24 +1119,537 @@
t raised')%0A%0A
+ def test_reload_conf(self):%0A with TempDir() as temp:%0A path = temp.sub('test_config.yaml', b'foo: bar')%0A config = confuse.Configuration('test', __name__)%0A config.set_file(filename=path)%0A self.assertEqual(config%5B'foo'%5D.get(), 'bar')%0A temp.sub('test_config.yaml', b'foo: bar2%5Cntest: hello world')%0A config.reload()%0A self.assertEqual(config%5B'foo'%5D.get(), 'bar2')%0A self.assertEqual(config%5B'test'%5D.get(), 'hello world')%0A%0A
def test
|
8faa77e8c7a93620f116d4394788d1f2b560aa2f
|
comment fix
|
models/core/types.py
|
models/core/types.py
|
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Special column types used by PyFarm's models.
"""
from textwrap import dedent
from uuid import uuid4, UUID
from UserDict import UserDict
from UserList import UserList
from netaddr import IPAddress
try:
from json import dumps, loads
except ImportError:
from simplejson import dumps, loads
from sqlalchemy.types import TypeDecorator, CHAR, String
from sqlalchemy.dialects.postgresql import UUID as PGUuid
from pyfarm.flaskapp import db
JSON_NONE = dumps(None)
NoneType = type(None) # from stdlib types module
class GUID(TypeDecorator):
"""
Platform-independent GUID type.
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
.. note::
This code is copied from sqlalchemy's standard documentation
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == "postgresql":
return dialect.type_descriptor(PGUuid())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == "postgresql":
return str(value)
else:
if not isinstance(value, UUID):
return "%.32x" % UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return UUID(value)
class JSONSerializable(TypeDecorator):
"""
Base of all custom types which process json data
to and from the database.
:cvar serialize_types:
the kinds of objects we expect to serialize to
and from the database
:cvar serialize_none:
if True then return None instead of converting it to
its json value
:cvar allow_blank:
if True, do not raise a :class:`ValueError` for empty data
:cvar allow_empty:
if True, do not raise :class:`ValueError` if the input data
itself is empty
"""
impl = String
serialize_types = None
serialize_none = False
def __init__(self, *args, **kwargs):
super(JSONSerializable, self).__init__(*args, **kwargs)
# make sure the subclass is doing something we expect
if self.serialize_types is None:
raise NotImplementedError("`serialize_types` is not defined")
def dumps(self, value):
"""
Performs the process of dumping `value` to json. For classes
such as :class:`UserDict` or :class:`UserList` this will dump the
underlying data instead of the object itself.
"""
if isinstance(value, (UserDict, UserList)):
value = value.data
return dumps(value)
def process_bind_param(self, value, dialect):
"""Converts the value being assigned into a json blob"""
if value is None:
return self.dumps(value) if self.serialize_none else value
elif isinstance(value, self.serialize_types):
return self.dumps(value)
else:
args = (value, self.__class__.__name__)
raise ValueError("unexpected input %s for `%s`" % args)
def process_result_value(self, value, dialect):
"""Converts data from the database into a Python object"""
return value if value is None else loads(value)
class JSONList(JSONSerializable):
"""Column type for storing list objects as json"""
serialize_types = (list, tuple, UserList)
class JSONDict(JSONSerializable):
"""Column type for storing dictionary objects as json"""
serialize_types = (dict, UserDict)
class IPv4Address(TypeDecorator):
"""
Column type which can store and retrieve IPv4 addresses in a more
efficient manner
"""
def process_bind_param(self, value, dialect):
if isinstance(value, int):
return value
elif isinstance(value, basestring):
return int(IPAddress(value))
elif isinstance(value, IPAddress):
return int(value)
else:
raise ValueError("unexpected type %s for value" % type(value))
def process_result_value(self, value, dialect):
return IPAddress(value)
def IDColumn():
"""
Produces a column used for `id` on each table. Typically this is done
using a class in :mod:`pyfarm.models.mixins` however because of the ORM
and the table relationships it's cleaner to have a function produce
the column.
"""
return db.Column(IDType, primary_key=True, unique=True, default=IDDefault,
doc=dedent("""
Provides an id for the current row. This value should
never be directly relied upon and it's intended for use
by relationships."""))
# the universal mapping which can be used, even if the underlying
# type changes in the future
IDType = GUID
IDDefault = uuid4
|
Python
| 0
|
@@ -1124,16 +1124,17 @@
pe(None)
+
# from
|
421f32947fc2035d7578899a51be779f72983a74
|
Document `replace` parameter
|
girder/utility/setting_utilities.py
|
girder/utility/setting_utilities.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import six
_validators = {}
_defaultFunctions = {}
def registerValidator(key, fn, replace=False):
"""
Register a validator for a given setting key.
:param key: The setting key.
:type key: str
:param fn: The function that will validate this key.
:type fn: callable
:param replace: If a validator already exists for this key, set this to True to replace the
existing validator. The default is to add the new validator in addition to running the
old validation function.
:type replace: bool
"""
if not replace and key in _validators:
old = _validators[key]
def wrapper(doc):
fn(doc)
old(doc)
_validators[key] = wrapper
else:
_validators[key] = fn
def getValidator(key):
"""
Retrieve the validator function for the given key. Returns ``None`` if none is registered.
"""
return _validators.get(key)
def registerDefaultFunction(key, fn):
"""
Register a default value function for a given setting key.
:param key: The setting key.
:type key: str
:param fn: The function that will return the default value for this key.
:type fn: callable
"""
_defaultFunctions[key] = fn
def getDefaultFunction(key):
"""
Retrieve the default value function for the given key. Returns ``None`` if none is registered.
"""
return _defaultFunctions.get(key)
class validator(object): # noqa: class name
"""
Create a decorator indicating that the wrapped function is responsible for
validating the given key or set of keys. For example,
>>> @validator('my_plugin.setting_key')
>>> def validateMySetting(doc):
>>> if not doc['value']:
>>> raise ValidationException('This key must not be empty.')
:param key: The key(s) that this function validates.
:type key: str or iterable of str
"""
def __init__(self, key, replace=False):
if isinstance(key, six.string_types):
key = {key}
self.keys = key
self.replace = replace
def __call__(self, fn):
for k in self.keys:
registerValidator(k, fn, replace=self.replace)
return fn
class default(object): # noqa: class name
"""
Create a decorator indicating that the wrapped function is responsible for
providing the default value for the given key or set of keys.
:param key: The key(s) that this function validates.
:type key: str or iterable of str
"""
def __init__(self, key):
if isinstance(key, six.string_types):
key = {key}
self.keys = key
def __call__(self, fn):
for k in self.keys:
registerDefaultFunction(k, fn)
return fn
|
Python
| 0.000003
|
@@ -2670,32 +2670,280 @@
iterable of str%0A
+ :param replace: If a validator already exists for this key, set this to True to replace the%0A existing validator. The default is to add the new validator in addition to running the%0A old validation function.%0A :type replace: bool%0A
%22%22%22%0A%0A def
|
fdd2283761e5eefc5db1ea51642b670f6f187faa
|
tweak validator constructor for old versions of PyQt
|
glue/qt/widgets/histogram_widget.py
|
glue/qt/widgets/histogram_widget.py
|
from functools import partial
import numpy as np
from PyQt4 import QtGui
from ...core import message as msg
from ...clients.histogram_client import HistogramClient
from ..ui.histogramwidget import Ui_HistogramWidget
from ..glue_toolbar import GlueToolbar
from ..mouse_mode import RectangleMode
from .data_viewer import DataViewer
from ..layer_artist_model import QtLayerArtistContainer
WARN_SLOW = 10000000
class HistogramWidget(DataViewer):
LABEL = "Histogram"
def __init__(self, data, parent=None):
super(HistogramWidget, self).__init__(self, parent)
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self.ui = Ui_HistogramWidget()
self.ui.setupUi(self.central_widget)
container = QtLayerArtistContainer()
self._artist_container = container
self.client = HistogramClient(data,
self.ui.mplWidget.canvas.fig,
artist_container=container)
self.ui.artist_view.setModel(container.model)
self.ui.xmin.setValidator(QtGui.QDoubleValidator())
self.ui.xmax.setValidator(QtGui.QDoubleValidator())
lo, hi = self.client.xlimits
self.ui.xmin.setText(str(lo))
self.ui.xmax.setText(str(hi))
self.make_toolbar()
self._connect()
self._data = data
self._tweak_geometry()
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.ui.splitter.setSizes([350, 120])
self.ui.main_splitter.setSizes([300, 100])
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
ui.attributeCombo.currentIndexChanged.connect(
self._set_attribute_from_combo)
ui.attributeCombo.currentIndexChanged.connect(
self._update_minmax_labels)
ui.binSpinBox.valueChanged.connect(partial(setattr, cl, 'nbins'))
ui.normalized_box.toggled.connect(partial(setattr, cl, 'normed'))
ui.autoscale_box.toggled.connect(partial(setattr, cl, 'autoscale'))
ui.cumulative_box.toggled.connect(partial(setattr, cl, 'cumulative'))
ui.xlog_box.toggled.connect(partial(setattr, cl, 'xlog'))
ui.ylog_box.toggled.connect(partial(setattr, cl, 'ylog'))
ui.xmin.returnPressed.connect(self._set_limits)
ui.xmax.returnPressed.connect(self._set_limits)
def _set_limits(self):
lo = float(self.ui.xmin.text())
hi = float(self.ui.xmax.text())
self.client.xlimits = lo, hi
def _update_minmax_labels(self):
lo, hi = self.client.xlimits
self.ui.xmin.setText(str(lo))
self.ui.xmax.setText(str(hi))
def make_toolbar(self):
result = GlueToolbar(self.ui.mplWidget.canvas, self,
name='Histogram')
for mode in self._mouse_modes():
result.add_mode(mode)
self.addToolBar(result)
return result
def _mouse_modes(self):
axes = self.client.axes
rect = RectangleMode(axes, release_callback=self.apply_roi)
return [rect]
def apply_roi(self, mode):
roi = mode.roi()
self.client.apply_roi(roi)
def _update_attributes(self):
combo = self.ui.attributeCombo
component = self.component
combo.clear()
data = [a.layer.data for a in self._artist_container]
try:
combo.currentIndexChanged.disconnect()
except TypeError:
pass
for d in data:
for c in d.visible_components:
if not np.can_cast(d.dtype(c), np.float):
continue
combo.addItem("%s (%s)" % (c.label, d.label), userData=c)
combo.currentIndexChanged.connect(self._set_attribute_from_combo)
combo.currentIndexChanged.connect(self._update_minmax_labels)
if component is not None:
self.component = component
else:
combo.setCurrentIndex(0)
self._set_attribute_from_combo()
@property
def component(self):
combo = self.ui.attributeCombo
index = combo.currentIndex()
return combo.itemData(index)
@component.setter
def component(self, component):
combo = self.ui.attributeCombo
#combo.findData doesn't seem to work in PyQt4
for i in range(combo.count()):
data = combo.itemData(i)
if data is component:
combo.setCurrentIndex(i)
return
raise IndexError("Component not present: %s" % component)
def _set_attribute_from_combo(self):
self.client.set_component(self.component)
self._update_window_title()
def add_data(self, data):
""" Add data item to combo box.
If first addition, also update attributes """
if self.data_present(data):
return True
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
self.client.add_layer(data)
self._update_attributes()
self._update_minmax_labels()
return True
def add_subset(self, subset):
pass
def _remove_data(self, data):
""" Remove data item from the combo box """
pass
def data_present(self, data):
return data in self._artist_container
def register_to_hub(self, hub):
super(HistogramWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=lambda x: self._remove_data(x.data))
hub.subscribe(self,
msg.DataUpdateMessage,
handler=self._update_labels)
def unregister(self, hub):
self.client.unregister(hub)
hub.unsubscribe_all(self)
def _update_window_title(self):
c = self.client.component
if c is not None:
label = str(c.label)
else:
label = 'Histogram'
self.setWindowTitle(label)
def _update_labels(self):
self._update_window_title()
self._update_attributes()
|
Python
| 0
|
@@ -1084,33 +1084,17 @@
-self.ui.xmin.setV
+v
alidator
(QtG
@@ -1081,33 +1081,35 @@
validator
-(
+ =
QtGui.QDoubleVal
@@ -1107,33 +1107,69 @@
DoubleValidator(
-)
+None)%0A validator.setDecimals(7
)%0A self.u
@@ -1168,26 +1168,26 @@
self.ui.xm
-ax
+in
.setValidato
@@ -1188,29 +1188,51 @@
lidator(
-QtGui.QDouble
+validator)%0A self.ui.xmax.set
Validato
@@ -1233,17 +1233,25 @@
lidator(
-)
+validator
)%0A
|
c08e25172d362176c8abed3d2bf54c2cf13da303
|
Fix test for settings_helpers
|
glue/tests/test_settings_helpers.py
|
glue/tests/test_settings_helpers.py
|
from mock import patch
import os
from glue.config import SettingRegistry
from glue._settings_helpers import load_settings, save_settings
def test_roundtrip(tmpdir):
settings = SettingRegistry()
settings.add('STRING', 'green', str)
settings.add('INT', 3, int)
settings.add('FLOAT', 5.5, float)
settings.add('LIST', [1,2,3], list)
with patch('glue.config.settings', settings):
with patch('glue.config.CFG_DIR', tmpdir.strpath):
settings.STRING = 'blue'
settings.INT = 4
settings.FLOAT = 3.5
settings.LIST = ['A', 'BB', 'CCC']
save_settings()
assert os.path.exists(os.path.join(tmpdir.strpath, 'settings.cfg'))
settings.STRING = 'red'
settings.INT = 3
settings.FLOAT = 4.5
settings.LIST = ['DDD', 'EE', 'F']
load_settings()
assert settings.STRING == 'blue'
assert settings.INT == 4
assert settings.FLOAT == 3.5
assert settings.LIST == ['A', 'BB', 'CCC']
|
Python
| 0.000001
|
@@ -879,32 +879,42 @@
load_settings(
+force=True
)%0A%0A a
|
01917a077681949d29eb48173e031b5dfd441e0d
|
update angle.py
|
test/function/angle/angle.py
|
test/function/angle/angle.py
|
import numpy as np
def angle2D(vec1,vec2):
length1 = np.linalg.norm(vec1)
length2 = np.linalg.norm(vec2)
print("length ", length1, length2)
return np.arccos(np.dot(vec1,vec2)/(length1*length2))
def angle3D(vec1, vec2):
# return the angle
v1 = vec1[[0,1]]
v2 = vec2[[0,1]]
a3 = angle2D(v1,v2)
v1 = vec1[[0,2]]
v2 = vec2[[0,2]]
a2 = angle2D(v1,v2)
v1 = vec1[[1,2]]
v2 = vec2[[1,2]]
a1 = angle2D(v1,v2)
return np.array([a1,a2,a3])
np.set_printoptions(precision=18)
v1 = np.array([-5.908280911892572,-1.04509170227587,-3.0])
v_ref = np.array([-5.908280911892572,-1.04509170227587,-8.0])
print(angle3D(v1,v_ref), )
|
Python
| 0.000001
|
@@ -146,16 +146,100 @@
ength2)%0A
+ if length1 %3C 1e-16:%0A return 0.%0A if length2 %3C 1e-16:%0A return 0.%0A
retu
@@ -728,16 +728,106 @@
7,-8.0%5D)
+%0A%0Av1 = np.array(%5B0., 0., 1.%5D)%0Av_ref = np.array(%5B-6.758097397797128,6.190970809322855,4.0%5D)
%0Aprint(a
|
77593739e13f472d844076d38f31b4a767332840
|
Improve list of locations in admin
|
dthm4kaiako/events/admin.py
|
dthm4kaiako/events/admin.py
|
"""Module for admin configuration for the events application."""
import logging
from django.contrib import admin
from django.utils.timezone import now
from django.contrib.gis.db import models as geomodels
from django.utils.translation import gettext_lazy as _
from events.models import (
Event,
Session,
Location,
Organiser,
Sponsor,
Series,
)
from mapwidgets.widgets import GooglePointFieldWidget
logger = logging.getLogger(__name__)
class LocationAdmin(admin.ModelAdmin):
"""Inline view for event locations."""
formfield_overrides = {
geomodels.PointField: {"widget": GooglePointFieldWidget}
}
class SessionInline(admin.StackedInline):
"""Inline view for event sessions."""
model = Session
fk_name = 'event'
extra = 1
min_num = 1
class EventUpcomingListFilter(admin.SimpleListFilter):
title = _('time')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'time'
def lookups(self, request, model_admin):
"""Return a list of tuples.
The first element in each tuple is the coded value for
the option that will appear in the URL query.
The second element is the human-readable name for
the option that will appear in the right sidebar.
"""
return (
('upcoming', _('Upcoming events')),
('past', _('Past events')),
('all', _('All events')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() is None:
self.used_parameters[self.parameter_name] = 'upcoming'
if self.value() == 'upcoming':
return queryset.filter(end__gte=now())
elif self.value() == 'past':
return queryset.filter(end__lt=now())
else:
return queryset
def choices(self, changelist):
"""Override default method to remove 'All' option."""
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == str(lookup),
'query_string': changelist.get_query_string({self.parameter_name: lookup}),
'display': title,
}
class EventAdmin(admin.ModelAdmin):
"""Admin view for an event."""
model = Event
inlines = [SessionInline]
fieldsets = (
(
None,
{
'fields': (
'name',
'description',
'series',
'organisers',
'sponsors',
'price',
)
}
),
('Location', {
'fields': ('accessible_online', 'locations'),
}),
('Registration', {
'description': 'Currently only registration via URL is available.',
'fields': ('registration_link', ),
}),
('Visibility', {
'fields': (
'published',
'featured',
'show_schedule',
),
}),
)
list_display = ('name', 'location_summary', 'series', 'start', 'end', 'featured')
list_filter = (EventUpcomingListFilter, 'organisers', )
ordering = ('start', 'end', 'name')
def save_related(self, request, form, formsets, change):
"""Trigger update of event datetimes after sessions are saved."""
super().save_related(request, form, formsets, change)
# Update datetimes on event after saving sessions
form.instance.update_datetimes()
admin.site.register(Event, EventAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(Organiser)
admin.site.register(Sponsor)
admin.site.register(Series)
|
Python
| 0
|
@@ -633,24 +633,192 @@
dget%7D%0A %7D%0A
+ list_display = (%0A 'name',%0A 'room',%0A 'street_address',%0A 'suburb',%0A 'city',%0A 'region',%0A )%0A list_filter = ('region', )%0A
%0A%0Aclass Sess
|
c59e03b7e87544eb1b954c96275bc6e8546e8a0b
|
Remove time code
|
cactusbot/services/beam/handler.py
|
cactusbot/services/beam/handler.py
|
"""Handle data from Beam."""
from logging import getLogger
import json
import asyncio
import time
from ...packets import MessagePacket, EventPacket
from .api import BeamAPI
from .chat import BeamChat
from .constellation import BeamConstellation
from .parser import BeamParser
class BeamHandler:
"""Handle data from Beam services."""
def __init__(self, channel, handlers):
self.logger = getLogger(__name__)
self.api = BeamAPI()
self.parser = BeamParser()
self.handlers = handlers # HACK, potentially
self._channel = channel
self.channel = ""
self.chat = None
self.constellation = None
self.chat_events = {
"ChatMessage": "message"
}
self.constellation_events = {
"channel": {
"followed": self.on_follow,
"subscribed": self.on_subscribe,
"hosted": self.on_host
}
}
async def run(self, *auth):
"""Connect to Beam chat and handle incoming packets."""
channel = await self.api.get_channel(self._channel)
self.channel = str(channel["id"])
self.api.channel = self.channel # HACK
user = await self.api.login(*auth)
chat = await self.api.get_chat(channel["id"])
self.chat = BeamChat(channel["id"], *chat["endpoints"])
await self.chat.connect(user["id"], chat["authkey"])
asyncio.ensure_future(self.chat.read(self.handle_chat))
self.constellation = BeamConstellation(channel["id"], user["id"])
await self.constellation.connect()
asyncio.ensure_future(
self.constellation.read(self.handle_constellation))
async def handle_chat(self, packet):
"""Handle chat packets."""
start = int(round(time.time() * 1000))
data = packet.get("data")
if data is None:
return
event = packet.get("event")
if event in self.chat_events:
event = self.chat_events[event]
# HACK
if getattr(self.parser, "parse_" + event):
data = self.parser.parse_message(data)
for response in self.handlers.handle(event, data):
if callable(response):
response = await response(response)
print(response)
await self.send(response.text) # HACK
end = int(round(time.time() * 1000))
print(end - start)
elif packet.get("id") == "auth":
if data.get("authenticated") is True:
await self.send("CactusBot activated. Enjoy! :cactus")
else:
self.logger.error("Chat authentication failure!")
async def handle_constellation(self, packet):
"""Handle constellation packets."""
packet = json.loads(packet)
data = packet.get("data")
if not isinstance(data, dict):
return
event = data["channel"].split(":")
data = data.get("payload")
if not isinstance(data, dict):
return
if event is None:
return
if "user" in data:
if event[0] in self.constellation_events:
if event[2] in self.constellation_events[event[0]]:
data = self.constellation_events[event[0]][event[2]](data)
if data is not None:
for response in data:
await self.send(response.text)
async def send(self, *args, **kwargs):
"""Send a packet to Beam."""
if self.chat is None:
raise ConnectionError("Chat not initialized.")
await self.chat.send(*args, **kwargs)
def on_follow(self, data):
"""Handle follow packets from Constellation."""
if data["following"]:
return self.handlers.handle("follow", EventPacket("follow", data["user"]["username"]))
def on_subscribe(self, data):
"""Handle subscribe packets from Constellation."""
return self.handlers.handle("subscribe", EventPacket("subscribe", data["user"]["username"]))
def on_host(self, data):
"""Handle host packets from Constellation."""
return self.handlers.handle("host", EventPacket("host", data["user"]["username"]))
|
Python
| 0.023487
|
@@ -84,20 +84,8 @@
ncio
-%0Aimport time
%0A%0Afr
@@ -1776,56 +1776,8 @@
%22%22%0A%0A
- start = int(round(time.time() * 1000))%0A%0A
@@ -2352,96 +2352,8 @@
HACK
-%0A end = int(round(time.time() * 1000))%0A print(end - start)
%0A%0A
|
dc1b26de1f4fd027f6662ac99b6a11cb53360db6
|
Use dict instead of iterable of sets for single values
|
grapheme/grapheme_property_group.py
|
grapheme/grapheme_property_group.py
|
import json
import os
from enum import Enum
class GraphemePropertyGroup(Enum):
PREPEND = "Prepend"
CR = "CR"
LF = "LF"
CONTROL = "Control"
EXTEND = "Extend"
REGIONAL_INDICATOR = "Regional_Indicator"
SPACING_MARK = "SpacingMark"
L = "L"
V = "V"
T = "T"
LV = "LV"
LVT = "LVT"
E_BASE = "E_Base"
E_MODIFIER = "E_Modifier"
ZWJ = "ZWJ"
GLUE_AFTER_ZWJ = "Glue_After_Zwj"
E_BASE_GAZ = "E_Base_GAZ"
OTHER = "Other"
def get_group(char):
return get_group_ord(ord(char))
def get_group_ord(char):
for char_set, group in SINGLE_CHAR_MAPPINGS:
if char in char_set:
return group
return RANGE_TREE.get_value(char) or GraphemePropertyGroup.OTHER
class ContainerNode:
"""
Simple implementation of interval based BTree with no support for deletion.
"""
def __init__(self, children):
self.children = self._sorted(children)
self._set_min_max()
def _set_min_max(self):
self.min = self.children[0].min
self.max = self.children[-1].max
# Adds an item to the node or it's subnodes. Returns a new node if this node is split, or None.
def add(self, item):
for child in self.children:
if child.min <= item.min <= child.max:
assert child.min <= item.max <= child.max
new_child = child.add(item)
if new_child:
return self._add_child(new_child)
else:
self._set_min_max()
return None
return self._add_child(item)
def get_value(self, key):
for child in self.children:
if child.min <= key <= child.max:
return child.get_value(key)
return None
def _add_child(self, child):
self.children.append(child)
self.children = self._sorted(self.children)
other = None
if len(self.children) >= 4:
other = ContainerNode(self.children[2:])
self.children = self.children[0:2]
self._set_min_max()
return other
def _sorted(self, children):
return sorted(children, key=lambda c: c.min)
class LeafNode:
def __init__(self, range_min, range_max, group):
self.min = range_min
self.max = range_max
self.group = group
# Assumes range check has already been done
def get_value(self, _key):
return self.group
# todo: should fix more efficient group lookup than this naive approach
with open(os.path.join(os.path.dirname(__file__), "data/grapheme_break_property.json"), 'r') as f:
data = json.load(f)
assert len(data) == len(GraphemePropertyGroup) - 1
SINGLE_CHAR_MAPPINGS = [
(
set(int(char, 16) for char in value["single_chars"]),
GraphemePropertyGroup(key)
) for key, value in data.items()
]
RANGE_TREE = None
for key, value in data.items():
for range_ in value["ranges"]:
new_node = LeafNode(
int(range_[0], 16),
int(range_[1], 16),
GraphemePropertyGroup(key)
)
if RANGE_TREE:
new_subtree = RANGE_TREE.add(new_node)
if new_subtree:
RANGE_TREE = ContainerNode([RANGE_TREE, new_subtree])
else:
RANGE_TREE = ContainerNode([new_node])
del data
|
Python
| 0
|
@@ -567,31 +567,16 @@
%0A
- for char_set,
group
-in
+=
SIN
@@ -596,43 +596,39 @@
INGS
-:%0A if char in char_set:%0A
+.get(char, None)%0A if group:%0A
@@ -2720,85 +2720,47 @@
S =
-%5B%0A (%0A set(int(char, 16) for char in value%5B%22single_chars%22%5D),
+%7B%7D%0A%0A for key, value in data.items():
%0A
@@ -2756,35 +2756,39 @@
tems():%0A
-
+group =
GraphemePropert
@@ -2810,47 +2810,99 @@
- )
for
-key, value in data.items()%0A %5D
+char in value%5B%22single_chars%22%5D:%0A SINGLE_CHAR_MAPPINGS%5Bint(char, 16)%5D = group
%0A%0A
|
2b3281863f11fa577dd6504e58f6faec8ada2259
|
Change order of API call
|
qiime_studio/api/v1.py
|
qiime_studio/api/v1.py
|
from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
PLUGIN_MANAGER = PluginManager()
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def api_plugins():
plugin_list = list(PLUGIN_MANAGER.plugins.keys())
return jsonify({"names": plugin_list})
@v1.route('/workflows/<plugin_name>', methods=['GET'])
def api_workflows(plugin_name):
plugin = PLUGIN_MANAGER.plugins[plugin_name]
workflows_dict = {}
for key, value in plugin.workflows.items():
workflows_dict[key] = {}
workflows_dict[key]['info'] = "Produces: {}".format(list(value.signature.output_artifacts.values()))
return jsonify({"workflows": workflows_dict})
|
Python
| 0.000001
|
@@ -498,18 +498,8 @@
e('/
-workflows/
%3Cplu
@@ -507,16 +507,26 @@
in_name%3E
+/workflows
', metho
|
896482b83ad75c445e72dbb0eb6bc7246662f699
|
access token is adjusted
|
skybotapp/views.py
|
skybotapp/views.py
|
import json
import requests
from pprint import pprint
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
# yomamabot/fb_yomamabot/views.py
from django.views import generic
from django.http.response import HttpResponse
from django.template.context_processors import request
# Create your views here.
def post_facebook_message(fbid, recevied_message):
post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=<page-access-token>'
response_msg = json.dumps({"recipient":{"id":fbid}, "message":{"text":recevied_message}})
status = requests.post(post_message_url, headers={"Content-Type": "application/json"},data=response_msg)
pprint(status.json())
class SkyBotView(generic.View):
# def get(self, request, *args, **kwargs):
# if self.request.GET['hub.verify_token'] == '93985762':
# return HttpResponse(self.request.GET['hub.challenge'])
# else:
# return HttpResponse('Error, invalid token')
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return generic.View.dispatch(self, request, *args, **kwargs)
# Post function to handle Facebook messages
def post(self, request, *args, **kwargs):
# Converts the text payload into a python dictionary
incoming_message = json.loads(self.request.body.decode('utf-8'))
# Facebook recommends going through every entry since they might send
# multiple messages in a single call during high load
for entry in incoming_message['entry']:
for message in entry['messaging']:
# Check to make sure the received call is a message call
# This might be delivery, optin, postback for other events
if 'message' in message:
# Print the message to the terminal
pprint(message)
post_facebook_message(message['sender']['id'], message['message']['text'])
return HttpResponse()
def homeView(request):
return HttpResponse('Hello')
|
Python
| 0.000003
|
@@ -533,25 +533,185 @@
en=%3C
-page-access-token
+EAASfh0TDd8cBAHBMfkWQGAexatTOup01lZCXtUJ5CF5Imr5b7MeQu30v6TnEzQmvoJF9MZBzkoZBdhLaVcCSY2BtPivUNJh7pic5vfEA13qDr3TRQLuHn8aKpKZAip4X2QHqhBTa7XQNGPnII1cqNMP46gAaRYMzHHSnZA4NZCAwZDZD
%3E' %0A
|
06fa3a4625576a0d7d4897dabcc2979c36d62ce1
|
Remove unused code
|
dwarf/image/api_response.py
|
dwarf/image/api_response.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dwarf.utils import template
DETAILS = ('created_at', 'deleted', 'deleted_at', 'updated_at')
# -----------------------------------------------------------------------------
# Images API responses
IMAGE = DETAILS + ('checksum', 'container_format', 'disk_format', 'id',
'is_public', 'location', 'min_disk', 'min_ram', 'name',
'owner', 'protected', 'size', 'status')
IMAGE_PROPERTIES = {'properties': {}}
def images_create(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_list(data):
return {"images": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_show(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_update(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
|
Python
| 0
|
@@ -1309,99 +1309,8 @@
%7D%0A%0A%0A
-def images_show(data):%0A return %7B%22image%22: template(IMAGE, data, add=IMAGE_PROPERTIES)%7D%0A%0A%0A
def
|
1334c8fa989981e3c917cdc16869b04ad1c2f6e0
|
add --g-fatal-warnings gtk option
|
snaked/core/run.py
|
snaked/core/run.py
|
from optparse import OptionParser
import os
def get_manager():
parser = OptionParser()
parser.add_option('-s', '--session', dest='session',
help="Open snaked with specified session", default='default')
parser.add_option('', '--select-session', action="store_true", dest='select_session',
help="Show dialog to select session at startup", default=False)
parser.add_option('-d', '--debug', action="store_true", dest='debug',
help="Run embedded drainhunter", default=False)
options, args = parser.parse_args()
if options.select_session:
from snaked.core.gui import session_selector
options.session = session_selector.select_session()
from .app import is_master, serve
master, conn = is_master(options.session)
if master:
import gobject
gobject.threads_init()
from .manager import EditorManager
manager = EditorManager(options.session)
manager.start(args)
serve(manager, conn)
if options.debug:
import drainhunter.server
drainhunter.server.run()
return manager
else:
conn.send(['OPEN'] + list(map(os.path.abspath, args)))
conn.send(['END'])
conn.close()
return None
def run():
manager = get_manager()
if not manager:
return
import gtk
try:
gtk.main()
except KeyboardInterrupt:
manager.quit()
|
Python
| 0.000001
|
@@ -503,16 +503,85 @@
t=False)
+%0A parser.add_option('', '--g-fatal-warnings', action=%22store_true%22)
%0A%0A op
|
6670fe1d081e27417a3d340e2c12c061078582af
|
Bump version (pre-release)
|
django_xhtml2pdf/__init__.py
|
django_xhtml2pdf/__init__.py
|
# -*- coding: utf-8 -*-
"""
See PEP 386 (http://www.python.org/dev/peps/pep-0386/)
Release logic:
1. Remove "dev" from current.
2. git commit
3. git tag <version>
4. push to pypi + push to github
5. bump the version, append '.dev0'
6. git commit
7. push to github (to avoid confusion)
"""
__version__ = '0.0.3.dev0'
|
Python
| 0
|
@@ -303,16 +303,11 @@
= '0.0.3
-.dev0
'%0A%0A
|
94d18ba6ede9dc58a558c68fd3af9bbcadc7f189
|
Update urls.py For Django 1.6
|
djangobb_forum/tests/urls.py
|
djangobb_forum/tests/urls.py
|
from django.conf.urls.defaults import patterns, include
urlpatterns = patterns('',
(r'^forum/', include('djangobb_forum.urls', namespace='djangobb')),
)
|
Python
| 0.000001
|
@@ -18,17 +18,8 @@
urls
-.defaults
imp
|
0956a28da1a19be4551b73c913098721ae719e04
|
rename method
|
account_bank_statement_import_coda/wizard/account_bank_statement_import_coda.py
|
account_bank_statement_import_coda/wizard/account_bank_statement_import_coda.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of account_bank_statement_import_coda,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_bank_statement_import_coda is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# account_bank_statement_import_coda is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with account_bank_statement_import_coda.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
import datetime
import logging
from dateutil import parser as date_parser
from openerp import api, models
from openerp.tools.translate import _
from openerp.exceptions import Warning
_logger = logging.getLogger(__name__)
try:
from coda.parser import Parser
from coda.statement import AmountSign, MovementRecordType
except ImportError:
_logger.error(
"CODA parser unavailable because the `pycoda` Python library cannot "
"be found. It can be downloaded and installed from "
"`https://pypi.python.org/pypi/pycoda`.")
Parser = None
class account_bank_statement_import(models.TransientModel):
_inherit = 'account.bank.statement.import'
def _check_coda(self, data_file):
if Parser is None:
return False
try:
# Matches the first 24 characters of a CODA file, as defined by
# the febelfin specifications
return re.match(r'0{5}\d{9}05[ D] {7}', data_file) is not None
except:
return False
@api.model
def _parse_file(self, data_file):
if not self._check_coda(data_file):
return super(account_bank_statement_import, self)._parse_file(
data_file)
vals_bank_statements = []
try:
statements = Parser().parse(data_file)
for statement in statements:
vals_bank_statements.append(
self.get_st_vals(statement))
except Exception, e:
_logger.exception('Error when parsing coda file')
raise Warning(
_("The following problem occurred during import. "
"The file might not be valid.\n\n %s" % e.message))
acc_number = None
currency = None
if statements:
acc_number = statements[0].acc_number
currency = statement.currency
return currency, acc_number, vals_bank_statements
def get_st_vals(self, statement):
"""
This method return a dict of vals that can be passed to
create method of statement.
:return: dict of vals that represent additional infos for the statement
found in the file.
{
'name': paper_seq_number
'balance_start': balance_start,
'balance_end_real': balance_end_real,
'transactions': transactions
}
"""
balance_start = statement.old_balance
if statement.old_balance_amount_sign == AmountSign.DEBIT:
balance_start = - balance_start
balance_end_real = statement.new_balance
if statement.new_balance_amount_sign == AmountSign.DEBIT:
balance_end_real = - balance_end_real
transactions = []
statement_date = statement.new_balance_date
vals = {'balance_start': balance_start,
'balance_end_real': balance_end_real,
'date': statement_date,
'transactions': transactions
}
name = statement.paper_seq_number
if name:
year = ""
if statement_date:
parsed_date = date_parser.parse(statement_date)
year = "%s/" % parsed_date.year
vals.update({
'name': "%s%s" % (year, statement.paper_seq_number),
})
globalisation_dict = dict([
(st.ref_move, st) for st in statement.movements
if st.type == MovementRecordType.GLOBALISATION])
information_dict = {}
# build a dict of information by transaction_ref. The transaction_ref
# refers to the transaction_ref of a movement record
for info_line in statement.informations:
infos = information_dict.setdefault(info_line.transaction_ref, [])
infos.append(info_line)
for sequence, line in enumerate(
filter(lambda l: l.type != MovementRecordType.GLOBALISATION,
statement.movements)
):
info = self.get_st_line_vals(line,
globalisation_dict,
information_dict)
info['sequence'] = sequence
transactions.append(info)
return vals
def get_st_line_note_msg(self, line, information_dict):
"""This method returns a formatted note from line information
"""
note = []
if line.counterparty_name:
note.append(_('Counter Party') + ': ' +
line.counterparty_name)
if line.counterparty_number:
note.append(_('Counter Party Account') + ': ' +
line.counterparty_number)
if line.counterparty_address:
note.append(_('Counter Party Address') + ': ' +
line.counterparty_address)
infos = information_dict.get(line.transaction_ref, [])
if line.communication or infos:
communications = []
if line.communication:
communications.append(line.communication)
for info in infos:
communications.append(info.communication)
note.append(_('Communication') + ': ' +
" ".join(communications))
return note and '\n'.join(note) or None
def get_st_line_name(self, line, globalisation_dict):
"""
This method must return a valid name for the statement line
The name is the statement communication if exists or
the communication of the related globalisation line if exists or
'/'
"""
name = line.communication
if not name and line.ref_move in globalisation_dict:
name = globalisation_dict[line.ref_move].communication
return name or '/'
def get_st_line_vals(self, line, globalisation_dict, information_dict):
"""
This method must return a dict of vals that can be passed to create
method of statement line in order to record it. It is the
responsibility of every parser to give this dict of vals,
so each one can implement his own way of recording the lines.
:param: line: a dict of vals that represent a line of
result_row_list
:return: dict of values to give to the create method of
statement line,
it MUST contain at least:
{
'name':value,
'date':value,
'amount':value,
'ref':value,
}
"""
amount = line.transaction_amount
if line.transaction_amount_sign == AmountSign.DEBIT:
amount = - amount
return {'name': self.get_st_line_name(line, globalisation_dict),
'date': line.entry_date or datetime.datetime.now().date(),
'amount': amount,
'ref': line.ref,
'partner_name': line.counterparty_name or None,
'account_number': line.counterparty_number or None,
'note': self.get_st_line_note_msg(line, information_dict),
'unique_import_id': line.ref + line.transaction_ref,
}
@api.model
def _complete_stmts_vals(self, stmts_vals, journal_id, account_number):
stmts_vals = super(
account_bank_statement_import, self)._complete_stmts_vals(
stmts_vals, journal_id, account_number)
journal = self.env['account.journal'].browse(journal_id)
for st_vals in stmts_vals:
st_vals['name'] = '%s/%s' % (journal.code, st_vals['name'])
return stmts_vals
|
Python
| 0.000004
|
@@ -5419,20 +5419,16 @@
ine_note
-_msg
(self, l
@@ -8253,12 +8253,8 @@
note
-_msg
(lin
|
bdceb4c7bc0b71755d9f63974a5597e29fd94e75
|
comment test code
|
tester.py
|
tester.py
|
import urllib2
from socket import p
import settings
import random
import threading
import Queue
import json
import requests
from settings import USER_AGENTS
def makeRequest(proxy, target):
i_headers = {'User-Agent': random.choice(USER_AGENTS)}
print("\n")
try:
r = requests.get(target, proxies=proxy, headers=i_headers, timeout=5)
except Exception, e:
print "Test Failed: %s By %s \nException: %s" % (target, str(proxy), str(e))
return False
else:
print "Test Successed: %s By %s" % (target, str(proxy))
return True
def makeAProxyRequest(proxy, testTarget):
i_headers = {'User-Agent':random.choice(settings.USER_AGENTS)}
url = testTarget
print("\n")
try:
r = requests.get(url, proxies=proxy, headers = i_headers, timeout=5)
except Exception, e:
print "Test Failed: %s By %s \nException: %s" % (testTarget, str(proxy), str(e))
return False
else:
print "Test Successed: %s By %s" % (testTarget, str(proxy))
return True
def makeFullTestForOneProxy(proxy, type = 'ALL'):
checkedCount = 0
for testTarget in settings.TestTargetsCN:
connected = makeAProxyRequest(proxy, testTarget)
if connected == True:
checkedCount += 1
quality = checkedCount * 1.0 / len(settings.TestTargetsCN)
return quality
class WorkThread(threading.Thread):
def __init__(self, name, workQueue, aa=None):
super(WorkThread, self).__init__()
self.queue = workQueue
self.name = name
self.aa = aa
def run(self):
print "Starting " + self.name
while True:
if self.queue.empty():
print "Exiting " + self.name
break
proxy = self.queue.get()
if proxy != None:
print "Thread: " + self.name + " Size: " + str(self.queue.qsize())
if self.aa == None:
makeFullTestForOneProxy(proxy)
else:
makeAProxyRequest(proxy, self.aa)
self.queue.task_done()
# makeFullTestForOneProxy({"http":"115.218.126.59:9000"})
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'http://www.woshipm.com/')
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'https://www.baidu.com/')
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'http://www.v2ex.com/')
jsonFile = "proxy.json"
f = open(jsonFile)
fileData = f.read()
f.close()
proxys = json.loads(fileData)
workQueue = Queue.Queue(0)
for proxy in proxys:
workQueue.put(proxy)
for i in range(10):
name = "Thread " + str(i)
thread = WorkThread(name, workQueue)
thread.start()
workQueue.join()
|
Python
| 0
|
@@ -2144,16 +2144,18 @@
com/')%0A%0A
+#
jsonFile
@@ -2170,16 +2170,18 @@
y.json%22%0A
+#
f = open
@@ -2191,16 +2191,18 @@
onFile)%0A
+#
fileData
@@ -2213,16 +2213,18 @@
.read()%0A
+#
f.close(
@@ -2225,16 +2225,18 @@
close()%0A
+#
proxys =
@@ -2257,18 +2257,22 @@
leData)%0A
-%0A%0A
+#%0A#%0A#
workQueu
@@ -2290,17 +2290,20 @@
ueue(0)%0A
-%0A
+#%0A#
for prox
@@ -2315,16 +2315,18 @@
proxys:%0A
+#
%09workQue
@@ -2339,17 +2339,20 @@
(proxy)%0A
-%0A
+#%0A#
for i in
@@ -2363,16 +2363,18 @@
ge(10):%0A
+#
%09name =
@@ -2392,16 +2392,18 @@
str(i)%0A
+#
%09thread
@@ -2432,16 +2432,18 @@
kQueue)%0A
+#
%09thread.
@@ -2450,16 +2450,18 @@
start()%0A
+#
workQueu
|
3fb934d505f22987f39e28d35bb17ea157770913
|
fix check for tcp port mapping on remove service link
|
rancher/servicelink.py
|
rancher/servicelink.py
|
import json
import re
from rancher import exit, util
import requests
class ServiceLink:
rancherApiVersion = '/v1/'
request_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
def __init__(self, configuration):
self.config = configuration
def __get_load_balancer_targets(self):
end_point = self.config['rancherBaseUrl'] + self.rancherApiVersion + 'serviceconsumemaps?limit=-1'
response = requests.get(end_point,
auth=(self.config['rancherApiAccessKey'], self.config['rancherApiSecretKey']),
headers=self.request_headers, verify=False)
if response.status_code not in range(200, 300):
exit.err(response.text)
data = json.loads(response.text)['data']
services = []
for item in data:
if 'ports' in item:
if item['ports'] is not None:
services.append(
{'serviceId': item['consumedServiceId'], 'ports': item['ports'], 'state': item['state']})
return services
def __get_load_balancer_ports(self):
end_point = self.config['rancherBaseUrl'] + self.rancherApiVersion + 'loadbalancerservices/' + self.config[
'loadBalancerSvcId']
response = requests.get(end_point,
auth=(self.config['rancherApiAccessKey'], self.config['rancherApiSecretKey']),
headers=self.request_headers, verify=False)
if response.status_code not in range(200, 300):
exit.err(response.text)
data = json.loads(response.text)
return data['launchConfig']['ports']
def __set_load_balancer_targets(self, targets):
payload = util.build_payload({'serviceLinks': targets})
end_point = self.config['rancherBaseUrl'] + self.rancherApiVersion + 'loadbalancerservices/' + self.config[
'loadBalancerSvcId'] + \
'/?action=setservicelinks'
response = requests.post(end_point,
auth=(self.config['rancherApiAccessKey'], self.config['rancherApiSecretKey']),
headers=self.request_headers, verify=False, data=payload)
if response.status_code not in range(200, 300):
exit.err(response.text)
def add_load_balancer_target(self, svc_id, host, desired_port, internal_port):
port_set = False
targets = self.__get_load_balancer_targets()
for idx, target in reversed(list(enumerate(targets))):
if target['state'] == 'removed':
del targets[idx]
continue
if target['serviceId'] == str(svc_id) and 'ports' in target:
for port in target['ports']:
if port.lower().startswith(host.lower() + ':' + str(desired_port)) \
or (port.endswith('=' + str(internal_port)) and re.compile("^\d+=\d+$").match(
port) is not None):
exit.info('This target already exists: ' + str(target))
target['ports'].append(host + ':' + str(desired_port) + '=' + str(internal_port))
port_set = True
targets[idx] = target
if not port_set:
targets.append(
{'serviceId': str(svc_id), 'ports': [host + ':' + str(desired_port) + '=' + str(internal_port)]})
self.__set_load_balancer_targets(targets)
self.__update_load_balancer_service()
def remove_load_balancer_target(self, svc_id, host, desired_port):
port_removed = False
targets = self.__get_load_balancer_targets()
for idx, target in reversed(list(enumerate(targets))):
if target['state'] == 'removed':
del targets[idx]
continue
if target['serviceId'] == str(svc_id) and 'ports' in target:
for port in target['ports']:
if port.lower().startswith(host.lower() + ':' + str(desired_port)) \
or (port.endswith(str(desired_port) + '=') and re.compile("^\d+=\d+$").match(
port) is not None):
target['ports'].remove(port)
port_removed = True
if len(target['ports']) > 0:
targets[idx] = target
else:
del targets[idx]
# Commented break. It because rancher can create duplicate ports.
# https://github.com/rancher/rancher/issues/4631
# break
# if port_removed:
# break
if not port_removed:
exit.info('No such target')
self.__set_load_balancer_targets(targets)
self.__update_load_balancer_service()
def __update_load_balancer_service(self):
payload = '{}'
end_point = self.config['rancherBaseUrl'] + self.rancherApiVersion + 'loadbalancerservices/' + self.config[
'loadBalancerSvcId'] + \
'/?action=update'
response = requests.post(end_point,
auth=(self.config['rancherApiAccessKey'], self.config['rancherApiSecretKey']),
headers=self.request_headers, verify=False, data=payload)
if response.status_code not in range(200, 300):
exit.err(response.text)
def get_available_port(self):
ports = self.__get_load_balancer_ports()
port_list = []
for port in ports:
if '/tcp' in port:
port_list.append(port.split(':')[0])
targets = self.__get_load_balancer_targets()
for target in targets:
if 'ports' in target:
for port in target['ports']:
if re.compile("^\d+=\d+$").match(port) is not None:
port_list.remove(port.split('=')[0])
if len(port_list) > 0:
return port_list[0]
print 'There is no available ports'
exit(2)
|
Python
| 0
|
@@ -4126,27 +4126,29 @@
or (port.
-end
+start
swith(str(de
|
8aee0469073e7aa7afb9a97000fe4f0cc1746d76
|
add last-modified headers to deepzoom views
|
readux/dyndzi/views.py
|
readux/dyndzi/views.py
|
import logging
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.views.decorators.http import condition, last_modified
from deepzoom import DeepZoomImageDescriptor
from eulfedora.server import TypeInferringRepository
from eulfedora.views import datastream_etag
from readux.books.models import Image
from readux.dyndzi import TILE_OVERLAP, TILE_SIZE
from readux.dyndzi.models import DziImage
logger = logging.getLogger(__name__)
def get_image_object_or_404(request, img_id):
'''Utility method to get an image in Fedora or raise an
:class:`~django.http.Http404` if the image is not found or accessible.
:param img_id: image identifier (Fedora pid)
'''
try:
# currently expected to return an Image cmodel with djatoka service
repo = TypeInferringRepository()
return repo.get_object(img_id)
except:
raise Http404
def image_etag(request, img_id, **kwargs):
'''ETag method for Fedora Image datastream, to allow browser caching on DZI views
:param img_id: image identifier (Fedora pid)
'''
return datastream_etag(request, img_id, Image.image.id, **kwargs)
@condition(etag_func=image_etag)
def image_dzi(request, img_id):
'''Generate and return the xml portion of a DZI file.
:param img_id: image identifier (i.e. fedora object pid)
'''
img = get_image_object_or_404(request, img_id)
return HttpResponse(DziImage(img).serialize(pretty=True),
mimetype='application/xml')
@condition(etag_func=image_etag)
def dzi_tile(request, img_id, level, column, row, fmt):
'''Generate a single tile image for the specified level, column,
row, and format.
:param img_id: image identifier
:param level: scale level in the dzi image pyramid
:param column: column number for the requested tile
:param row: row number for the requested tile
:param fmt: image format (currently ignored)
'''
# NOTE: format is currently ignored
# deepzoom functions expect numbers and not strings
level = int(level)
column = int(column)
row = int(row)
# calculate the appropriate djatoka call to make and return the appropriate scaled image tile
# or error if any of the parameters are wrong
# get the object (expected to be one of the image cmodels with djatoka services)
img = get_image_object_or_404(request, img_id)
# generate a deepzoom.py tile descriptor to help with level/scale/tile calculations
tiledescriptor = DeepZoomImageDescriptor(width=img.width, height=img.height,
tile_size=TILE_SIZE, tile_overlap=TILE_OVERLAP)
# on invalid level or col/row, return 500 error
columns, rows = tiledescriptor.get_num_tiles(float(level))
if column == 1 and row == 1:
logger.debug("level %d [%d x %d]", level, columns, rows)
invalid_msg = None
if column > columns:
invalid_msg = 'requested column %d exceeds number of columns (%d) for this level' % \
(column, columns)
elif row > rows:
invalid_msg = 'requested row %d exceeds number of rows (%d) for this level' % \
(row, rows)
if invalid_msg is not None:
return HttpResponseBadRequest('Cannot generate requested tile: %s' % invalid_msg)
# for the smallest tile, if we need only one row and column
# - scale only (djatoka doesn't like cropping to full size)
if columns == 1 and rows == 1:
# get the width and height for the full image at this zoom level
levelwidth, levelheight = tiledescriptor.get_dimensions(level)
# scale full image to needed level size and return
logger.debug('Requesting scale=%d,%d', levelwidth, levelheight)
return HttpResponse(img.get_region(scale='%d,%d' % (levelwidth, levelheight)),
mimetype='image/jpeg')
# otherwise, we need to crop and scale
# get bounds for the currently requested tile on the current level
sx1, sy1, sx2, sy2 = tiledescriptor.get_tile_bounds(level, column, row)
# final size we want for the tile, after crop and scale
scaledtilewidth = sx2 - sx1
scaledtileheight = sy2 - sy1
# get the scale factor for this level
scale = tiledescriptor.get_scale(level)
# deepzoom.py crops first and then scales, but djatoka does things
# in the reverse order
# scale the coordinates back up so we can crop based on the master image
x1 = sx1/scale
y1 = sy1/scale
x2 = sx2/scale
y2 = sy2/scale
# calculate cropped portion width & height
# (djatoka uses y,x,h,w for cropping)
cropwidth = x2 - x1
cropheight = y2 - y1
# ... something about this may be slightly off, getting line
# artifacts between tiles at certain zoom levels
# (possibly a djatoka issue?)
logger.debug("Requesting scale=%d,%d region=%d,%d,%d,%d",
scaledtilewidth, scaledtileheight,
y1, x1, cropheight, cropwidth)
# call image/djatoka get_region with calculated region and scale
return HttpResponse(img.get_region(scale='%s,%s' % \
(scaledtilewidth, scaledtileheight),
region='%d,%d,%d,%d' % \
(y1, x1, cropheight, cropwidth)),
mimetype='image/jpeg')
|
Python
| 0
|
@@ -132,24 +132,9 @@
tion
-, last_modified
%0A
+
%0Afro
@@ -228,16 +228,28 @@
pository
+, Repository
%0Afrom eu
@@ -1156,39 +1156,561 @@
s)%0A%0A
-@condition(etag_func=image_etag
+%0Adef image_lastmodified(request, img_id , **kwargs):%0A '''Last-modified for Fedora Image datastream, to allow browser caching%0A on DZI views.%0A%0A :param img_id: image identifier (Fedora pid)%0A '''%0A # NOTE: next version of eulfedora should have a reusable datastream%0A # last-modified method similar to existing datastream_etag%0A repo = Repository()%0A img = repo.get_object(img_id, type=Image)%0A if img.image and img.image.exists:%0A return img.image.created%0A%0A%0A@condition(etag_func=image_etag, last_modified_func=image_lastmodified
)%0Ade
@@ -2033,16 +2033,16 @@
/xml')%0A%0A
-
@conditi
@@ -2064,16 +2064,55 @@
age_etag
+, last_modified_func=image_lastmodified
)%0Adef dz
|
c6cde6a72204a9e688ea0d6dfe9550f2cb39a0fc
|
resolve incorrect merge conflict resolution
|
common/lib/xmodule/xmodule/modulestore/tests/test_xml.py
|
common/lib/xmodule/xmodule/modulestore/tests/test_xml.py
|
import os.path
from nose.tools import assert_raises, assert_equals
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import XML_MODULESTORE_TYPE
from .test_modulestore import check_path_to_location
from . import DATA_DIR
class TestXMLModuleStore(object):
def test_path_to_location(self):
"""Make sure that path_to_location works properly"""
print "Starting import"
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
print "finished import"
check_path_to_location(modulestore)
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
assert_equals(store.get_modulestore_type('foo/bar/baz'), XML_MODULESTORE_TYPE)
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with assert_raises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'], load_error_modules=False)
# Look up the errors during load. There should be none.
location = CourseDescriptor.id_to_location("edX/toy/2012_Fall")
errors = modulestore.get_item_errors(location)
assert errors == []
|
Python
| 0.000005
|
@@ -276,17 +276,29 @@
on%0Afrom
-.
+xmodule.tests
import
|
7eff792fd654335e423c17cf1fa8dd70d2187090
|
Fixed double quoting of the ">>" string in search.py.
|
channelguide/guide/views/search.py
|
channelguide/guide/views/search.py
|
from cgi import escape
import urllib
import re
from django.utils.translation import gettext as _
from django.http import Http404
from channelguide import util, cache
from channelguide.guide import search as search_mod
from channelguide.guide.templateutil import Pager
from channelguide.guide.models import (Channel, Category, Tag, Item, Language,
ChannelSearchData, ItemSearchData)
FRONT_PAGE_LIMIT = 10
FRONT_PAGE_LIMIT_ITEMS = 20
def get_search_terms(query):
return [term for term in re.split("\s", query.strip())]
def terms_too_short(terms):
return len([term for term in terms if len(term) >= 3]) == 0
def search_channels(request, terms):
query = search_mod.search_channels(terms)
if not request.user.is_moderator():
query.where(state=Channel.APPROVED)
return query
def search_items(request, terms):
query = search_mod.search_items(terms)
if not request.user.is_moderator():
query.where(state=Channel.APPROVED)
return query
def more_results_link(query, total_results):
href = 'search-more-channels?query=' + urllib.quote_plus(query)
label = _('%d More Matching Channels >>') % (total_results -
FRONT_PAGE_LIMIT)
return util.make_link(href, escape(label))
def more_results_link_items(query, total_results):
href = 'search-more-items?query=' + urllib.quote_plus(query)
label = _('%d More Matching Channel Videos >>') % (total_results -
FRONT_PAGE_LIMIT_ITEMS)
return util.make_link(href, escape(label))
def search_results(connection, class_, terms, search_attribute='name'):
query = class_.query().load('channel_count')
query.having(class_.c.channel_count > 0)
search_column = class_.c.get(search_attribute)
for term in terms:
query.where(search_column.like('%s%%' % term))
return query.execute(connection)
@cache.aggresively_cache
def search(request):
context = {}
try:
search_query = request.GET['query']
except:
raise Http404
search_query = search_query.strip()
terms = get_search_terms(search_query)
if terms_too_short(terms):
return util.render_to_response(request, 'channel-search.html', {
'results_count': 0,
'search_query': search_query,
})
query = search_channels(request, terms)
results_count = query.count(request.connection)
results = query.limit(FRONT_PAGE_LIMIT).execute(request.connection)
query = search_items(request, terms)
item_results_count = query.count(request.connection)
item_results = query.limit(FRONT_PAGE_LIMIT_ITEMS).execute(request.connection)
tags = search_results(request.connection, Tag, terms)
languages = search_results(request.connection, Language, terms)
categories = search_results(request.connection, Category, terms)
if (results_count == 1 and (item_results_count == len(tags) ==
len(languages) == len(categories) == 0)):
return util.redirect(results[0].get_absolute_url())
return util.render_to_response(request, 'channel-search.html', {
'results': results,
'results_count': results_count,
'item_results': item_results,
'item_results_count': item_results_count,
'extra_results': results_count > FRONT_PAGE_LIMIT,
'extra_item_results': item_results_count > FRONT_PAGE_LIMIT_ITEMS,
'tags': tags,
'languages': languages,
'categories': categories,
'search_query': search_query,
'more_results_link': more_results_link(search_query, results_count),
'more_results_link_items': more_results_link_items(search_query,
item_results_count),
})
def do_search_more(request, title, search_func):
try:
search_query = request.GET['query']
except:
raise Http404
terms = get_search_terms(search_query)
if terms_too_short(terms):
return util.render_to_response(request, 'search-more.html', {})
search_query = search_query.strip()
query = search_func(request, terms)
pager = Pager(20, query, request)
return util.render_to_response(request, 'search-more.html', {
'title': title % search_query,
'search_query': search_query,
'results': pager.items,
'pager': pager,
})
@cache.aggresively_cache
def search_more(request):
title = _('Channels Matching %s')
return do_search_more(request, title, search_channels)
@cache.aggresively_cache
def search_more_items(request):
title = _('Channels With Videos Matching %s')
return do_search_more(request, title, search_items)
|
Python
| 0.999671
|
@@ -1,27 +1,4 @@
-from cgi import escape%0A
impo
@@ -1197,38 +1197,30 @@
_link(href,
-escape(
label)
-)
%0A%0Adef more_r
@@ -1468,22 +1468,14 @@
ef,
-escape(
label)
-)
%0A%0Ade
|
c79ccb44edbca1ccc5d5b1bddb4fa8dc19e6df66
|
update middleware for django 1.10 issue
|
chapter6/growth_studio/settings.py
|
chapter6/growth_studio/settings.py
|
"""
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
### settings.py file
### settings that are not environment dependent
try:
from .local_settings import *
except ImportError:
pass
|
Python
| 0
|
@@ -1119,61 +1119,8 @@
= %5B%0A
- 'django.middleware.security.SecurityMiddleware',%0A
@@ -1328,32 +1328,102 @@
ionMiddleware',%0A
+ 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',%0A
'django.cont
@@ -1528,16 +1528,69 @@
eware',%0A
+ 'django.middleware.security.SecurityMiddleware',%0A
%5D%0A%0AROOT_
|
2b3667dfc4fbd6571da288146d4e8f8f8f2d51a1
|
Fix broken sorted set unit test.
|
test/unit/test_sorted_set.py
|
test/unit/test_sorted_set.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
|
Python
| 0
|
@@ -731,17 +731,17 @@
3, 3%5D),
-4
+3
)%0A%5D, ids
|
656e41ef504d42d2fcf0155aaedccc45c9d72c33
|
Add the null handler to the root logger to prevent Tornado from doing logging.basicConfig
|
rejected/controller.py
|
rejected/controller.py
|
"""
OS Level controlling class invokes startup, shutdown and handles signals.
"""
import clihelper
import logging
import signal
import sys
from rejected import mcp
from rejected import __version__
LOGGER = logging.getLogger(__name__)
class Controller(clihelper.Controller):
"""Rejected Controller application that invokes the MCP and handles all
of the OS level concerns.
"""
def _master_control_program(self):
"""Return an instance of the MasterControlProgram.
:rtype: rejected.mcp.MasterControlProgram
"""
return mcp.MasterControlProgram(self._config,
consumer=self._options.consumer,
profile=self._options.profile,
quantity=self._options.quantity)
def _prepend_python_path(self, path): #pragma: no cover
"""Add the specified value to the python path.
:param str path: The path to append
"""
LOGGER.debug('Prepending "%s" to the python path.', path)
sys.path.insert(0, path)
def _setup(self):
"""Continue the run process blocking on MasterControlProgram.run"""
# If the app was invoked to specified to prepend the path, do so now
if self._options.prepend_path:
self._prepend_python_path(self._options.prepend_path)
def stop(self):
"""Shutdown the MCP and child processes cleanly"""
LOGGER.info('Shutting down controller')
self.set_state(self.STATE_STOP_REQUESTED)
# Clear out the timer
signal.setitimer(signal.ITIMER_PROF, 0, 0)
self._mcp.stop_processes()
if self._mcp.is_running:
LOGGER.info('Waiting up to 3 seconds for MCP to shut things down')
signal.setitimer(signal.ITIMER_REAL, 3, 0)
signal.pause()
LOGGER.info('Post pause')
# Force MCP to stop
if self._mcp.is_running:
LOGGER.warning('MCP is taking too long, requesting process kills')
self._mcp.stop_processes()
del self._mcp
else:
LOGGER.info('MCP exited cleanly')
# Change our state
self._stopped()
LOGGER.info('Shutdown complete')
def run(self):
"""Run the rejected Application"""
self._setup()
self._mcp = self._master_control_program()
try:
self._mcp.run()
except KeyboardInterrupt:
LOGGER.info('Caught CTRL-C, shutting down')
clihelper.setup_logging(self._debug)
if self.is_running:
self.stop()
def _cli_options(parser):
"""Add options to the parser
:param optparse.OptionParser parser: The option parser to add options to
"""
parser.add_option('-P', '--profile',
action='store',
default=None,
dest='profile',
help='Profile the consumer modules, specifying '
'the output directory.')
parser.add_option('-o', '--only',
action='store',
default=None,
dest='consumer',
help='Only run the consumer specified')
parser.add_option('-p', '--prepend-path',
action='store',
default=None,
dest='prepend_path',
help='Prepend the python path with the value.')
parser.add_option('-q', '--qty',
action='store',
type='int',
default=1,
dest='quantity',
help='Run the specified quanty of consumer processes '
'when used in conjunction with -o')
def main():
"""Called when invoking the command line script."""
clihelper.setup('rejected', 'RabbitMQ consumer framework', __version__)
clihelper.run(Controller, _cli_options)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -134,16 +134,44 @@
rt sys%0A%0A
+from rejected import common%0A
from rej
@@ -1298,16 +1298,50 @@
so now%0A
+ common.add_null_handler()%0A
|
db0e6265892231ecf10244eb7ddcddc62a12b82b
|
Fix bug where cached items in subfolders would be re-read.
|
configmanager.py
|
configmanager.py
|
import json
import os
import os.path
class ConfigManager():
_cache = {}
def __init__(self, configPath = "configs/"):
if os.path.isdir(configPath):
self.configPath = configPath
else:
raise IOError("Config Path does not eixst")
self._configs = {}
self._syncCache()
self.getConfigs()
def __getitem__(self, key):
try:
return self._configs[key]
except KeyError:
self.syncCache()
return self._configs[key]
#Recursive function to get all files. Sub is the relative path from the root config dir.
def getConfigs(self, path = None, sub = "", overrideCache = False):
if path == None:
path = self.configPath
files = os.listdir(path)
for item in files:
#Ignore hidden files.
if item[0] == ".":
continue
#Remove the .json handle from the name
name = item.replace(".json", "")
finalPath = os.path.join(sub, name)
#If it's a directory, run this function again within that directory
if os.path.isdir(os.path.join(path, item)):
self.getConfigs(path = os.path.join(path, item), sub = os.path.join(sub, item))
#If we already have something from the cache, skip it.
elif overrideCache or name not in self._configs:
#Read in the file
f = open(os.path.join(path, item), "r")
#Check if it's JSON. If it is, it will be parsed.
parsed = self.parseConfig(f.read())
f.close()
if parsed != None:
self.addConfig(finalPath, parsed)
#Returns parsed JSON if config is valid JSON, otherwise, return Noen
def parseConfig(self, config):
try:
return json.loads(config)
except ValueError:
return None
def addConfig(self, name, contents):
self._configs[name] = contents
ConfigManager._cache[name] = contents
def _syncCache(self):
unmatchedKeys = [key for key in ConfigManager._cache.keys() if key not in self._configs]
for key in unmatchedKeys:
self._configs[key] = ConfigManager._cache[key]
|
Python
| 0
|
@@ -829,20 +829,20 @@
%09%09%09final
-Path
+Name
= os.pa
@@ -1111,16 +1111,40 @@
e cache,
+ or added in previously,
skip it
@@ -1170,17 +1170,22 @@
ache or
-n
+finalN
ame not
@@ -1424,20 +1424,20 @@
ig(final
-Path
+Name
, parsed
@@ -1438,17 +1438,16 @@
parsed)%0A
-%0A
%09#Return
|
109a07b8344df9c2420c2cea7f9bd6419284c920
|
Fix get_employees_with_number query
|
erpnext/communication/doctype/call_log/call_log.py
|
erpnext/communication/doctype/call_log/call_log.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from erpnext.crm.doctype.utils import get_scheduled_employees_for_popup, strip_number
from frappe.contacts.doctype.contact.contact import get_contact_with_phone_number
from erpnext.crm.doctype.lead.lead import get_lead_with_phone_number
class CallLog(Document):
def before_insert(self):
number = strip_number(self.get('from'))
self.contact = get_contact_with_phone_number(number)
self.lead = get_lead_with_phone_number(number)
def after_insert(self):
self.trigger_call_popup()
def on_update(self):
doc_before_save = self.get_doc_before_save()
if not doc_before_save: return
if doc_before_save.status in ['Ringing'] and self.status in ['Missed', 'Completed']:
frappe.publish_realtime('call_{id}_disconnected'.format(id=self.id), self)
elif doc_before_save.to != self.to:
self.trigger_call_popup()
def trigger_call_popup(self):
scheduled_employees = get_scheduled_employees_for_popup(self.to)
employee_emails = get_employees_with_number(self.to)
# check if employees with matched number are scheduled to receive popup
emails = set(scheduled_employees).intersection(employee_emails)
# # if no employee found with matching phone number then show popup to scheduled employees
# emails = emails or scheduled_employees if employee_emails
for email in emails:
frappe.publish_realtime('show_call_popup', self, user=email)
@frappe.whitelist()
def add_call_summary(call_log, summary):
doc = frappe.get_doc('Call Log', call_log)
doc.add_comment('Comment', frappe.bold(_('Call Summary')) + '<br><br>' + summary)
def get_employees_with_number(number):
number = strip_number(number)
if not number: return []
employee_emails = frappe.cache().hget('employees_with_number', number)
if employee_emails: return employee_emails
employees = frappe.get_all('Employee', filters={
'cell_number': ['like', '%{}'.format(number)],
'user_id': ['!=', '']
}, fields=['user_id'])
employee_emails = [employee.user_id for employee in employees]
frappe.cache().hset('employees_with_number', number, employee_emails)
return employee
def set_caller_information(doc, state):
'''Called from hooks on creation of Lead or Contact'''
if doc.doctype not in ['Lead', 'Contact']: return
numbers = [doc.get('phone'), doc.get('mobile_no')]
# contact for Contact and lead for Lead
fieldname = doc.doctype.lower()
# contact_name or lead_name
display_name_field = '{}_name'.format(fieldname)
for number in numbers:
number = strip_number(number)
if not number: continue
filters = frappe._dict({
'from': ['like', '%{}'.format(number)],
fieldname: ''
})
logs = frappe.get_all('Call Log', filters=filters)
for log in logs:
frappe.db.set_value('Call Log', log.name, {
fieldname: doc.name,
display_name_field: doc.get_title()
}, update_modified=False)
|
Python
| 0.000003
|
@@ -2094,32 +2094,33 @@
': %5B'like', '%25%7B%7D
+%25
'.format(number)
|
e98e7313c5a7c3ef6c25a81d873f10a727f9523c
|
Change remaining tf.mul -> tf.multiply, tf.neg -> tf.negative, and tf.sub -> tf.subtract
|
tensorflow/python/saved_model/example/saved_model_half_plus_two.py
|
tensorflow/python/saved_model/example/saved_model_half_plus_two.py
|
## Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an example linear regression inference graph.
Exports a TensorFlow graph to /tmp/saved_model/half_plus_two/ based on the
SavedModel format.
This graph calculates,
y = a*x + b
and/or, independently,
y2 = a*x2 + c
where a, b and c are variables with a=0.5 and b=2 and c=3.
Output from this program is typically used to exercise SavedModel load and
execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import compat
tf.app.flags.DEFINE_string("output_dir", "/tmp/saved_model_half_plus_two",
"Directory where to ouput SavedModel.")
tf.app.flags.DEFINE_string("output_dir_pbtxt",
"/tmp/saved_model_half_plus_two_pbtxt",
"Directory where to ouput the text format of "
"SavedModel.")
FLAGS = tf.flags.FLAGS
def _write_assets(assets_directory, assets_filename):
"""Writes asset files to be used with SavedModel for half plus two.
Args:
assets_directory: The directory to which the assets should be written.
assets_filename: Name of the file to which the asset contents should be
written.
Returns:
The path to which the assets file was written.
"""
if not file_io.file_exists(assets_directory):
file_io.recursive_create_dir(assets_directory)
path = os.path.join(
compat.as_bytes(assets_directory), compat.as_bytes(assets_filename))
file_io.write_string_to_file(path, "asset-file-contents")
return path
def _generate_saved_model_for_half_plus_two(export_dir, as_text=False):
"""Generates SavedModel for half plus two.
Args:
export_dir: The directory to which the SavedModel should be written.
as_text: Writes the SavedModel protocol buffer in text format to disk.
"""
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Set up the model parameters as variables to exercise variable loading
# functionality upon restore.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
c = tf.Variable(3.0, name="c")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32)}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
y = tf.add(tf.mul(a, x), b, name="y")
x2 = tf.placeholder(tf.float32, name="x2")
tf.add(tf.mul(a, x2), c, name="y2")
# Create an assets file that can be saved and restored as part of the
# SavedModel.
original_assets_directory = "/tmp/original/export/assets"
original_assets_filename = "foo.txt"
original_assets_filepath = _write_assets(original_assets_directory,
original_assets_filename)
# Set up the assets collection.
assets_filepath = tf.constant(original_assets_filepath)
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, assets_filepath)
filename_tensor = tf.Variable(
original_assets_filename,
name="filename_tensor",
trainable=False,
collections=[])
assign_filename_op = filename_tensor.assign(original_assets_filename)
# Set up the signature for regression with input and output tensor
# specification.
input_tensor = meta_graph_pb2.TensorInfo()
input_tensor.name = serialized_tf_example.name
signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor}
output_tensor = meta_graph_pb2.TensorInfo()
output_tensor.name = tf.identity(y).name
signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor}
signature_def = signature_def_utils.build_signature_def(
signature_inputs, signature_outputs,
signature_constants.REGRESS_METHOD_NAME)
# Set up the signature for Predict with input and output tensor
# specification.
predict_input_tensor = meta_graph_pb2.TensorInfo()
predict_input_tensor.name = x.name
predict_signature_inputs = {
"x": predict_input_tensor
}
predict_output_tensor = meta_graph_pb2.TensorInfo()
predict_output_tensor.name = y.name
predict_signature_outputs = {
"y": predict_output_tensor
}
predict_signature_def = signature_def_utils.build_signature_def(
predict_signature_inputs, predict_signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
# Initialize all variables and then save the SavedModel.
sess.run(tf.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.REGRESS_METHOD_NAME:
signature_def,
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
},
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=tf.group(assign_filename_op))
builder.save(as_text)
def main(_):
_generate_saved_model_for_half_plus_two(FLAGS.output_dir)
print("SavedModel generated at: %s" % FLAGS.output_dir)
_generate_saved_model_for_half_plus_two(FLAGS.output_dir_pbtxt, as_text=True)
print("SavedModel generated at: %s" % FLAGS.output_dir_pbtxt)
if __name__ == "__main__":
tf.app.run()
|
Python
| 0.999141
|
@@ -3763,24 +3763,29 @@
f.add(tf.mul
+tiply
(a, x), b, n
@@ -3858,16 +3858,21 @@
d(tf.mul
+tiply
(a, x2),
|
e8d73688bd08921f62fad232938613051c3f32b5
|
Fix typo in has_url docstring.
|
wafer/talks/models.py
|
wafer/talks/models.py
|
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from markitup.fields import MarkupField
# constants to make things clearer elsewhere
ACCEPTED = 'A'
PENDING = 'P'
REJECTED = 'R'
class TalkType(models.Model):
"""A type of talk."""
name = models.CharField(max_length=255)
description = models.TextField(max_length=1024)
def __unicode__(self):
return u'%s' % (self.name,)
class Talk(models.Model):
class Meta:
permissions = (
("view_all_talks", "Can see all talks"),
)
TALK_STATUS = (
(ACCEPTED, 'Accepted'),
(REJECTED, 'Not Accepted'),
(PENDING, 'Under Consideration'),
)
talk_id = models.AutoField(primary_key=True)
talk_type = models.ForeignKey(TalkType, null=True)
title = models.CharField(max_length=1024)
abstract = MarkupField(
help_text=_("Write two or three paragraphs describing your talk. "
"Who is your audience? What will they get out of it? "
"What will you cover?<br />"
"You can use Markdown syntax."))
notes = models.TextField(
null=True, blank=True,
help_text=_("Any notes for the conference organisers?"))
status = models.CharField(max_length=1, choices=TALK_STATUS,
default=PENDING)
corresponding_author = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='contact_talks')
authors = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='talks')
def __unicode__(self):
return u'%s: %s' % (self.corresponding_author, self.title)
def get_absolute_url(self):
return reverse('wafer_talk', args=(self.talk_id,))
def get_author_contact(self):
email = self.corresponding_author.email
profile = self.corresponding_author.get_profile()
if profile.contact_number:
contact = profile.contact_number
else:
# Should we wrap this in a span for styling?
contact = 'NO CONTACT INFO'
return '%s - %s' % (email, contact)
get_author_contact.short_description = 'Contact Details'
def get_author_name(self):
return '%s (%s)' % (self.corresponding_author,
self.corresponding_author.get_full_name())
get_author_name.admin_order_field = 'corresponding_author'
get_author_name.short_description = 'Corresponding Author'
def get_author_display_name(self):
full_name = self.corresponding_author.get_full_name()
if full_name:
return full_name
return self.corresponding_author.username
def get_in_schedule(self):
if self.scheduleitem_set.all():
return True
return False
get_in_schedule.short_description = 'Added to schedule'
get_in_schedule.boolean = True
def has_url(self):
"""Test in the talk has urls associated with it"""
if self.talkurl_set.all():
return True
return False
has_url.boolean = True
# Helpful properties for the templates
accepted = property(fget=lambda x: x.status == ACCEPTED)
pending = property(fget=lambda x: x.status == PENDING)
reject = property(fget=lambda x: x.status == REJECTED)
def can_view(self, user):
if user.has_perm('talks.view_all_talks'):
return True
if self.authors.filter(username=user.username).exists():
return True
if self.accepted:
return True
return False
@classmethod
def can_view_all(cls, user):
return user.has_perm('talks.view_all_talks')
def can_edit(self, user):
if user.has_perm('talks.change_talk'):
return True
if self.pending:
if self.authors.filter(username=user.username).exists():
return True
return False
class TalkUrl(models.Model):
"""An url to stuff relevant to the talk - videos, slides, etc.
Note that these are explicitly not intended to be exposed to the
user, but exist for use by the conference organisers."""
description = models.CharField(max_length=256)
url = models.URLField()
talk = models.ForeignKey(Talk)
if settings.WAFER_NEEDS_SOUTH:
# Django 1.7 updates permissions automatically when migrate is run,
# but South 1.0 still needs this to be explicitly hooked up like we
# do here.
from south.signals import post_migrate
def update_permissions_after_migration(app, **kwargs):
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), get_models(),
verbosity=2 if settings.DEBUG else 0)
post_migrate.connect(update_permissions_after_migration)
|
Python
| 0.000017
|
@@ -3046,17 +3046,17 @@
%22%22Test i
-n
+f
the tal
|
17af071faa70d3dc4a884f62fb50f34e8621ac6d
|
Update watchman/constants.py
|
watchman/constants.py
|
watchman/constants.py
|
DEFAULT_CHECKS = (
'watchman.checks.caches',
'watchman.checks.databases',
'watchman.checks.storage',
)
PAID_CHECKS = (
'watchman.checks.email',
)
|
Python
| 0
|
@@ -108,16 +108,17 @@
age',%0A)%0A
+%0A
PAID_CHE
|
6fedc3e826220f69ffd503b7c73e02962cfc1752
|
use cp.testing.assert_array_equal
|
examples/gemm/sgemm.py
|
examples/gemm/sgemm.py
|
from __future__ import division
import argparse
import math
import cupy as cp
import numpy as np
from utils import benchmark
from utils import load_kernel
from utils import read_code
sgemm_file = 'sgemm.cu'
def sgemm(A, B,
dim_x=16, dim_y=16, blk_m=64, blk_n=64, blk_k=4,
dim_xa=64, dim_ya=4, dim_xb=4, dim_yb=64):
assert A.dtype == cp.float32
assert B.dtype == cp.float32
assert(dim_x * dim_y == dim_xa * dim_ya == dim_xb * dim_yb)
m, k = A.shape
k, n = B.shape
# Inputs matrices need to be in Fortran order.
A = cp.asfortranarray(A)
B = cp.asfortranarray(B)
C = cp.empty((m, n), dtype=cp.float32, order='F')
config = {'DIM_X': dim_x, 'DIM_Y': dim_y,
'BLK_M': blk_m, 'BLK_N': blk_n, 'BLK_K': blk_k,
'DIM_XA': dim_xa, 'DIM_YA': dim_ya,
'DIM_XB': dim_xb, 'DIM_YB': dim_yb,
'THR_M': blk_m // dim_x, 'THR_N': blk_n // dim_y}
code = read_code(sgemm_file, params=config)
kern = load_kernel('sgemm', code)
grid = (math.ceil(m / blk_m), math.ceil(n / blk_n), 1)
block = (dim_x, dim_y, 1)
args = (m, n, k, A, B, C)
shared_mem = blk_k * (blk_m + 1) * 4 + blk_n * (blk_k + 1) * 4
kern(grid, block, args=args, shared_mem=shared_mem)
return C
def main():
parser = argparse.ArgumentParser(
description='SGEMM kernel call from CuPy')
parser.add_argument(
'--m', type=int, default=np.random.randint(5000, 12000))
parser.add_argument(
'--n', type=int, default=np.random.randint(5000, 12000))
parser.add_argument(
'--k', type=int, default=np.random.randint(500, 5000))
args = parser.parse_args()
print('m={} n={} k={}'.format(args.m, args.n, args.k))
print('start benchmarking')
print('')
A = cp.random.uniform(
low=-1., high=1., size=(args.m, args.k)).astype(cp.float32)
B = cp.random.uniform(
low=-1., high=1., size=(args.k, args.n)).astype(cp.float32)
# check correctness
np.testing.assert_equal(sgemm(A, B).get(), cp.dot(A, B).get())
# dry run
for _ in range(3):
sgemm(A, B)
kernel_times = benchmark(sgemm, (A, B), n_run=5)
for _ in range(3):
cp.dot(A, B)
cublas_times = benchmark(cp.dot, (A, B), n_run=5)
print('=============================Result===============================')
print('hand written kernel time {} ms'.format(np.mean(kernel_times)))
print('cuBLAS time {} ms'.format(np.mean(cublas_times)))
if __name__ == '__main__':
main()
|
Python
| 0.000337
|
@@ -2012,17 +2012,17 @@
ess%0A
-n
+c
p.testin
@@ -2030,16 +2030,22 @@
.assert_
+array_
equal(sg
@@ -2053,22 +2053,16 @@
mm(A, B)
-.get()
, cp.dot
@@ -2071,14 +2071,8 @@
, B)
-.get()
)%0A%0A
|
e1a7e4535e64c005fb508ba6d3fed021bbd40a62
|
Update only tables in visible schemas
|
oedb_datamodels/versions/1a73867b1e79_add_meta_search.py
|
oedb_datamodels/versions/1a73867b1e79_add_meta_search.py
|
"""Add meta_search table
Revision ID: 1a73867b1e79
Revises: 1c6e2fb3d3b6
Create Date: 2019-04-29 11:47:04.783168
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm.session import sessionmaker
from api.actions import update_meta_search
# revision identifiers, used by Alembic.
revision = "1a73867b1e79"
down_revision = "1c6e2fb3d3b6"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"meta_search",
sa.Column("schema", sa.String(length=100), nullable=False),
sa.Column("table", sa.String(length=100), nullable=False),
sa.Column("comment", postgresql.TSVECTOR(), nullable=True),
sa.PrimaryKeyConstraint("schema", "table"),
schema="public",
)
conn = op.get_bind()
meta = sa.MetaData(bind=conn)
meta.reflect()
for table in meta.tables.values():
update_meta_search(table.name, table.schema)
def downgrade():
op.drop_table("meta_search", schema="public")
|
Python
| 0
|
@@ -293,16 +293,60 @@
a_search
+%0Afrom dataedit.views import schema_whitelist
%0A%0A# revi
@@ -1003,24 +1003,73 @@
s.values():%0A
+ if table.schema in schema_whitelist:%0A
upda
|
f1fec3790fee11ff3d83c272e3a2aa7bb548ddfa
|
Remove print
|
open_spiel/python/algorithms/expected_game_score_test.py
|
open_spiel/python/algorithms/expected_game_score_test.py
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.policy_value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
import open_spiel.python.games
import pyspiel
class PolicyValueTest(absltest.TestCase):
def test_expected_game_score_uniform_random_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [uniform_policy] * 2)
self.assertTrue(np.allclose(uniform_policy_values, [1 / 8, -1 / 8]))
def test_expected_game_score_uniform_random_iterated_prisoner_dilemma(self):
game = pyspiel.load_game(
"python_iterated_prisoners_dilemma(max_game_length=6)")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), uniform_policy)
print(uniform_policy_values)
self.assertTrue(
np.allclose(uniform_policy_values, [17.6385498, 17.6385498]))
if __name__ == "__main__":
absltest.main()
|
Python
| 0.000016
|
@@ -1712,41 +1712,8 @@
cy)%0A
- print(uniform_policy_values)%0A
|
0971a1216ccd88f7822148a83765b2b5346198ee
|
Fix infobar test by actually waiting for infobar before checking for it.
|
chrome/test/functional/infobars.py
|
chrome/test/functional/infobars.py
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import pyauto_functional # Must be imported before pyauto
import pyauto
class InfobarTest(pyauto.PyUITest):
"""TestCase for Infobars."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
To run:
python chrome/test/functional/infobars.py infobars.InfobarTest.Debug
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetBrowserInfo()
for window in info['windows']:
for tab in window['tabs']:
print 'Window', window['index'], 'tab', tab['index']
pp.pprint(tab['infobars'])
def _GetTabInfo(self, windex=0, tab_index=0):
"""Helper to return info for the given tab in the given window.
Defaults to first tab in first window.
"""
return self.GetBrowserInfo()['windows'][windex]['tabs'][tab_index]
def testPluginCrashInfobar(self):
"""Verify the "plugin crashed" infobar."""
flash_url = self.GetFileURLForPath(os.path.join(self.DataDir(),
'plugin', 'flash.swf'))
# Trigger flash plugin
self.NavigateToURL(flash_url)
child_processes = self.GetBrowserInfo()['child_processes']
flash = [x for x in child_processes if
x['type'] == 'Plug-in' and x['name'] == 'Shockwave Flash'][0]
self.assertTrue(flash)
logging.info('Killing flash plugin. pid %d' % flash['pid'])
self.Kill(flash['pid'])
self.WaitForInfobarCount(1)
crash_infobar = self._GetTabInfo()['infobars']
self.assertTrue(crash_infobar)
self.assertEqual(1, len(crash_infobar))
self.assertTrue(re.match('The following plug-in has crashed:',
crash_infobar[0]['text']))
self.assertEqual('alert_infobar', crash_infobar[0]['type'])
# Dismiss the infobar
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertFalse(self._GetTabInfo()['infobars'])
def _VerifyGeolocationInfobar(self, match_text, windex, tab_index):
"""Verify geolocation infobar and match given text.
Assumes that geolocation infobar is showing up in the given tab in the
given window.
"""
tab_info = self._GetTabInfo(windex, tab_index)
geolocation_infobar = tab_info['infobars']
self.assertTrue(geolocation_infobar)
self.assertEqual(1, len(geolocation_infobar))
self.assertEqual(match_text, geolocation_infobar[0]['text'])
self.assertEqual('Learn more', geolocation_infobar[0]['link_text'])
self.assertEqual(2, len(geolocation_infobar[0]['buttons']))
self.assertEqual('Allow', geolocation_infobar[0]['buttons'][0])
self.assertEqual('Deny', geolocation_infobar[0]['buttons'][1])
def testGeolocationInfobar(self):
"""Verify geoLocation infobar."""
url = 'http://m.flickr.com/#/nearby/' # automatically triggers geolocation
match_text='m.flickr.com wants to track your physical location'
self.NavigateToURL(url)
self._VerifyGeolocationInfobar(windex=0, tab_index=0, match_text=match_text)
# Accept, and verify that the infobar went away
self.PerformActionOnInfobar('accept', infobar_index=0)
self.assertFalse(self._GetTabInfo()['infobars'])
def testGeolocationInfobarInMultipleTabsAndWindows(self):
"""Verify GeoLocation inforbar in multiple tabs."""
url = 'http://m.flickr.com/#/nearby/' # automatically triggers geolocation
match_text='m.flickr.com wants to track your physical location'
for tab_index in range(1, 2):
self.AppendTab(pyauto.GURL(url))
self._VerifyGeolocationInfobar(windex=0, tab_index=tab_index,
match_text=match_text)
# Try in a new window
self.OpenNewBrowserWindow(True)
self.NavigateToURL(url, 1, 0)
self._VerifyGeolocationInfobar(windex=1, tab_index=0, match_text=match_text)
# Incognito window
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 2, 0)
self._VerifyGeolocationInfobar(windex=2, tab_index=0, match_text=match_text)
if __name__ == '__main__':
pyauto_functional.Main()
|
Python
| 0.000003
|
@@ -3202,24 +3202,56 @@
eToURL(url)%0A
+ self.WaitForInfobarCount(1)%0A
self._Ve
@@ -3315,24 +3315,24 @@
match_text)%0A
-
# Accept
@@ -3821,16 +3821,81 @@
L(url))%0A
+ self.WaitForInfobarCount(1, windex=0, tab_index=tab_index)%0A
se
@@ -4106,24 +4106,79 @@
(url, 1, 0)%0A
+ self.WaitForInfobarCount(1, windex=1, tab_index=0)%0A
self._Ve
@@ -4322,16 +4322,16 @@
WINDOW)%0A
-
self
@@ -4352,24 +4352,79 @@
(url, 2, 0)%0A
+ self.WaitForInfobarCount(1, windex=2, tab_index=0)%0A
self._Ve
|
fa52bbde01f62bb0816e71970ac50761947afa72
|
Improve comment
|
retaining_wall.py
|
retaining_wall.py
|
class RetainingWallSolver(object):
def retaining_wall(self, wood_lengths, required_lengths):
self.required_lengths = required_lengths
return self.retaining_wall_recursive(wood_lengths, len(required_lengths) - 1)
def retaining_wall_recursive(self, wood_lengths, required_length_idx):
if required_length_idx <= -1:
return {
'cuts': []
}
current_required_length = self.required_lengths[required_length_idx]
possible_subsolutions = []
for wood_length_idx in range(len(wood_lengths) - 1, -1, -1):
if wood_lengths[wood_length_idx] < current_required_length:
# cant cut from this length
continue
# what if we chose to cut this required length out of this wood length
new_wood_lengths = list(wood_lengths)
new_wood_lengths[wood_length_idx] -= current_required_length
subsolution = self.retaining_wall_recursive(new_wood_lengths, required_length_idx - 1)
if not subsolution:
continue
if new_wood_lengths[wood_length_idx] != 0:
subsolution['cuts'].append({
'wood_num': wood_length_idx,
'cut_amount': current_required_length
})
possible_subsolutions.append(subsolution)
if len(possible_subsolutions) == 0:
return False
# return the solution with the least number of cuts
return min(possible_subsolutions, key=lambda s: len(s['cuts']))
|
Python
| 0
|
@@ -768,21 +768,24 @@
cut
-this
+current_
required
len
@@ -780,17 +780,17 @@
required
-
+_
length o
|
b4a9380c73dd367c2cf6249cdf4cdbbdfdbc7907
|
fix example
|
examples/pythonnews.py
|
examples/pythonnews.py
|
"""
Extract python news from python.org
"""
import re
import logging
from pomp.core.base import BaseCrawler, BasePipeline
from pomp.core.item import Item, Field
from pomp.contrib import SimpleDownloader
logging.basicConfig(level=logging.DEBUG)
news_re = re.compile(r'<h2 class="news">(.*?)</h2>([\s\S]*?)<div class="pubdate">(.*?)</div>')
class PythonNewsItem(Item):
title = Field()
published = Field()
def __repr__(self):
return '%s\n\t%s\n' % (
self.title,
self.published,
)
class PythonNewsCrawler(BaseCrawler):
ENTRY_URL = 'http://python.org/news/'
def extract_items(self, response):
for i in news_re.findall(response.body.decode('utf-8')):
item = PythonNewsItem()
item.title, item.published = i[0], i[2]
yield item
def next_url(self, response):
return None # one page crawler
class PrintPipeline(BasePipeline):
def process(self, item):
print(item)
if __name__ == '__main__':
from pomp.core.engine import Pomp
pomp = Pomp(
downloader=SimpleDownloader(),
pipelines=[PrintPipeline()],
)
pomp.pump(PythonNewsCrawler())
|
Python
| 0.0001
|
@@ -958,16 +958,25 @@
ss(self,
+ crawler,
item):%0A
|
e3c42442f090b8b6982f7ff8c93632c43cfa80b3
|
use insights landing for offseason
|
tba_config.py
|
tba_config.py
|
import os
DEBUG = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
MAX_YEAR = 2015
# For choosing what the main landing page displays
KICKOFF = 1
BUILDSEASON = 2
COMPETITIONSEASON = 3
OFFSEASON = 4
INSIGHTS = 5
CHAMPS = 6
# The CONFIG variables should have exactly the same structure between environments
# Eventually a test environment should be added. -gregmarra 17 Jul 2012
if DEBUG:
CONFIG = {
"env": "dev",
"memcache": False,
"response_cache": False,
"firebase-url": "https://thebluealliance-dev.firebaseio.com/{}.json?auth={}"
}
else:
CONFIG = {
"env": "prod",
"memcache": True,
"response_cache": True,
"firebase-url": "https://thebluealliance.firebaseio.com/{}.json?auth={}"
}
CONFIG['landing_handler'] = OFFSEASON
CONFIG["static_resource_version"] = 7
|
Python
| 0
|
@@ -797,25 +797,24 @@
ler'%5D =
-OFFSEASON
+INSIGHTS
%0ACONFIG%5B
|
b860d7cb81488f5ebbe7e9e356a6d4f140c33df5
|
update to follow python 2to3 changes
|
tests/__init__.py
|
tests/__init__.py
|
from test_home import *
from test_feed import *
from test_shownote import *
from test_agenda import *
from test_episode import *
|
Python
| 0
|
@@ -1,17 +1,18 @@
from
+.
test_home im
@@ -19,24 +19,25 @@
port *%0Afrom
+.
test_feed im
@@ -44,24 +44,25 @@
port *%0Afrom
+.
test_shownot
@@ -73,24 +73,25 @@
port *%0Afrom
+.
test_agenda
@@ -104,16 +104,17 @@
*%0Afrom
+.
test_epi
|
d77256d1964354eb7dd178f383dd3254c3b4d975
|
Fix source docs page
|
docs/_helpers/source_page.py
|
docs/_helpers/source_page.py
|
"""Generate a restructured text document that describes built-in sources
and save it to this module's docstring for the purpose of including in
sphinx documentation via the automodule directive."""
import string
from sncosmo.models import _SOURCES
lines = [
'',
' '.join([20*'=', 7*'=', 10*'=', 27*'=', 30*'=', 7*'=', 20*'=']),
'{0:20} {1:7} {2:10} {3:27} {4:30} {5:7} {6:50}'.format(
'Name', 'Version', 'Type', 'Subclass', 'Reference', 'Website', 'Notes')
]
lines.append(lines[1])
urlnums = {}
allnotes = []
allrefs = []
for m in _SOURCES.get_loaders_metadata():
reflink = ''
urllink = ''
notelink = ''
if 'note' in m:
if m['note'] not in allnotes:
allnotes.append(m['note'])
notenum = allnotes.index(m['note'])
notelink = '[{0}]_'.format(notenum + 1)
if 'reference' in m:
reflink = '[{0}]_'.format(m['reference'][0])
if m['reference'] not in allrefs:
allrefs.append(m['reference'])
if 'url' in m:
url = m['url']
if url not in urlnums:
if len(urlnums) == 0:
urlnums[url] = 0
else:
urlnums[url] = max(urlnums.values()) + 1
urllink = '`{0}`_'.format(string.ascii_letters[urlnums[url]])
lines.append("{0!r:20} {1!r:7} {2:10} {3:27} {4:30} {5:7} {6:50}"
.format(m['name'], m['version'], m['type'], m['subclass'],
reflink, urllink, notelink))
lines.extend([lines[1], ''])
for refkey, ref in allrefs:
lines.append('.. [{0}] `{1}`__'.format(refkey, ref))
lines.append('')
for url, urlnum in urlnums.items():
lines.append('.. _`{0}`: {1}'.format(string.ascii_letters[urlnum], url))
lines.append('')
for i, note in enumerate(allnotes):
lines.append('.. [{0}] {1}'.format(i + 1, note))
lines.append('')
__doc__ = '\n'.join(lines)
|
Python
| 0.000001
|
@@ -278,17 +278,17 @@
'.join(%5B
-2
+3
0*'=', 7
@@ -342,17 +342,17 @@
'%7B0:
-2
+3
0%7D %7B1:7
@@ -1310,17 +1310,17 @@
d(%22%7B0!r:
-2
+3
0%7D %7B1!r
|
7377dfaa9877e49b41c8a6c8462b425729599728
|
Fix metadata version.
|
lib/ansible/modules/windows/win_pagefile.py
|
lib/ansible/modules/windows/win_pagefile.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017, Liran Nisanov <lirannis@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_pagefile
version_added: "2.4"
short_description: Query or change pagefile configuration
description:
- Query current pagefile configuration.
- Enable/Disable AutomaticManagedPagefile.
- Create new or override pagefile configuration.
options:
drive:
description:
- The drive of the pagefile.
initial_size:
description:
- The initial size of the pagefile in megabytes.
maximum_size:
description:
- The maximum size of the pagefile in megabytes.
override:
description:
- Override the current pagefile on the drive.
type: bool
default: 'yes'
system_managed:
description:
- Configures current pagefile to be managed by the system.
type: bool
default: 'no'
automatic:
description:
- Configures AutomaticManagedPagefile for the entire system.
type: bool
remove_all:
description:
- Remove all pagefiles in the system, not including automatic managed.
type: bool
default: 'no'
test_path:
description:
- Use Test-Path on the drive to make sure the drive is accessible before creating the pagefile.
type: bool
default: 'yes'
state:
description:
- State of the pagefile.
choices:
- present
- absent
- query
default: query
notes:
- There is difference between automatic managed pagefiles that configured once for the entire system and system managed pagefile that configured per pagefile.
- InitialSize 0 and MaximumSize 0 means the pagefile is managed by the system.
- Value out of range exception may be caused by several different issues, two common problems - No such drive, Pagefile size is too small.
- Setting a pagefile when AutomaticManagedPagefile is on will disable the AutomaticManagedPagefile.
author:
- Liran Nisanov (@LiranNis)
'''
EXAMPLES = r'''
- name: Query pagefiles configuration
win_pagefile:
- name: Query C pagefile
win_pagefile:
drive: C
- name: Set C pagefile, don't override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
override: no
state: present
- name: Set C pagefile, override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
state: present
- name: Remove C pagefile
win_pagefile:
drive: C
state: absent
- name: Remove all current pagefiles, enable AutomaticManagedPagefile and query at the end
win_pagefile:
remove_all: yes
automatic: yes
- name: Remove all pagefiles disable AutomaticManagedPagefile and set C pagefile
win_pagefile:
drive: C
initial_size: 2048
maximum_size: 2048
remove_all: yes
automatic: no
state: present
- name: Set D pagefile, override if exists
win_pagefile:
drive: d
initial_size: 1024
maximum_size: 1024
state: present
'''
RETURN = r'''
automatic_managed_pagefiles:
description: Whether the pagefiles is automatically managed.
returned: When state is query.
type: boolean
sample: true
pagefiles:
description: Contains caption, description, initial_size, maximum_size and name for each pagefile in the system.
returned: When state is query.
type: list
sample:
[{"caption": "c:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ c:\\", "initial_size": 2048, "maximum_size": 2048, "name": "c:\\pagefile.sys"},
{"caption": "d:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ d:\\", "initial_size": 1024, "maximum_size": 1024, "name": "d:\\pagefile.sys"}]
'''
|
Python
| 0
|
@@ -322,17 +322,17 @@
on': '1.
-0
+1
',%0A
|
5f5a7ec9460d60a964663ace670529813a41a9d9
|
Update bluetooth_ping_test.py
|
tests/bluetooth_ping_test.py
|
tests/bluetooth_ping_test.py
|
#!/usr/bin/env python
import os
import subprocess as subp
from subprocess import *
from avocado import Test
class WifiScanAP(Test):
def test():
targetDeviceMac = '8C:1A:BF:0D:31:A9'
bluetoothChannel = '2'
port = 1
print("Bluetooth ping test: testing " + targetDeviceMac)
p = subp.Popen(['sudo', 'l2ping', '8C:1A:BF:0D:31:A9','-c', '5'], stdout=subp.PIPE, stderr=subp.PIPE)
stdout, stderr = p.communicate()
res = stdout.rstrip()
if "5 sent, 5 received" in res:
self.log.debug("Bluetooth ping test succeeded: + res")
else:
self.fail("Bluetooth ping test: pinging " + targetDeviceMac + " failed")
|
Python
| 0.000002
|
@@ -102,16 +102,73 @@
t Test%0A%0A
+#I have used my Samsung Galaxy S7 Edge as target device%0A%0A
class Wi
|
3f8f4adec965be69a17a2577b8fd5dd94aa66015
|
Add test to guard against command arg help message regression (#8561)
|
tests/cli/test_cli_parser.py
|
tests/cli/test_cli_parser.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import contextlib
import io
import re
from collections import Counter
from unittest import TestCase
from airflow.cli import cli_parser
# Can not be `--snake_case` or contain uppercase letter
ILLEGAL_LONG_OPTION_PATTERN = re.compile("^--[a-z]+_[a-z]+|^--.*[A-Z].*")
# Only can be `-[a-z]` or `-[A-Z]`
LEGAL_SHORT_OPTION_PATTERN = re.compile("^-[a-zA-z]$")
cli_args = {k: v for k, v in cli_parser.__dict__.items() if k.startswith("ARG_")}
class TestCli(TestCase):
def test_arg_option_long_only(self):
"""
Test if the name of cli.args long option valid
"""
optional_long = [
arg
for arg in cli_args.values()
if len(arg.flags) == 1 and arg.flags[0].startswith("-")
]
for arg in optional_long:
self.assertIsNone(ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]),
f"{arg.flags[0]} is not match")
def test_arg_option_mix_short_long(self):
"""
Test if the name of cli.args mix option (-s, --long) valid
"""
optional_mix = [
arg
for arg in cli_args.values()
if len(arg.flags) == 2 and arg.flags[0].startswith("-")
]
for arg in optional_mix:
self.assertIsNotNone(LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]),
f"{arg.flags[0]} is not match")
self.assertIsNone(ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]),
f"{arg.flags[1]} is not match")
def test_subcommand_conflict(self):
"""
Test if each of cli.*_COMMANDS without conflict subcommand
"""
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group_name, sub in subcommand.items():
name = [command.name.lower() for command in sub]
self.assertEqual(len(name), len(set(name)),
f"Command group {group_name} have conflict subcommand")
def test_subcommand_arg_name_conflict(self):
"""
Test if each of cli.*_COMMANDS.arg name without conflict
"""
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]
self.assertListEqual([], conflict_arg,
f"Command group {group} function {com.name} have "
f"conflict args name {conflict_arg}")
def test_subcommand_arg_flag_conflict(self):
"""
Test if each of cli.*_COMMANDS.arg flags without conflict
"""
subcommand = {
key: val
for key, val in cli_parser.__dict__.items()
if key.isupper() and key.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
position = [
a.flags[0]
for a in com.args
if (len(a.flags) == 1
and not a.flags[0].startswith("-"))
]
conflict_position = [arg for arg, count in Counter(position).items() if count > 1]
self.assertListEqual([], conflict_position,
f"Command group {group} function {com.name} have conflict "
f"position flags {conflict_position}")
long_option = [a.flags[0]
for a in com.args
if (len(a.flags) == 1
and a.flags[0].startswith("-"))] + \
[a.flags[1]
for a in com.args if len(a.flags) == 2]
conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]
self.assertListEqual([], conflict_long_option,
f"Command group {group} function {com.name} have conflict "
f"long option flags {conflict_long_option}")
short_option = [
a.flags[0]
for a in com.args if len(a.flags) == 2
]
conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]
self.assertEqual([], conflict_short_option,
f"Command group {group} function {com.name} have conflict "
f"short option flags {conflict_short_option}")
def test_falsy_default_value(self):
arg = cli_parser.Arg(("--test",), default=0, type=int)
parser = argparse.ArgumentParser()
arg.add_to_parser(parser)
args = parser.parse_args(['--test', '10'])
self.assertEqual(args.test, 10)
args = parser.parse_args([])
self.assertEqual(args.test, 0)
def test_commands_and_command_group_sections(self):
parser = cli_parser.get_parser()
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
parser.parse_args(['--help'])
stdout = stdout.getvalue()
self.assertIn("Commands", stdout)
self.assertIn("Groups", stdout)
|
Python
| 0
|
@@ -6462,20 +6462,692 @@
n(%22Groups%22, stdout)%0A
+%0A def test_should_display_helps(self):%0A parser = cli_parser.get_parser()%0A%0A all_command_as_args = %5B%0A command_as_args%0A for top_commaand in cli_parser.airflow_commands%0A for command_as_args in (%0A %5B%5Btop_commaand.name%5D%5D%0A if isinstance(top_commaand, cli_parser.ActionCommand)%0A else %5B%0A %5Btop_commaand.name, nested_command.name%5D for nested_command in top_commaand.subcommands%0A %5D%0A )%0A %5D%0A for cmd_args in all_command_as_args:%0A with self.assertRaises(SystemExit):%0A parser.parse_args(%5B*cmd_args, '--help'%5D)%0A
|
9cc45f750c0860715e66c085895611984531c48c
|
update standalone disclosure url
|
paying_for_college/config/urls.py
|
paying_for_college/config/urls.py
|
from django.conf.urls import url, include
from django.conf import settings
from paying_for_college.views import LandingView, StandAloneView
from django.contrib import admin
from django.conf import settings
try:
STANDALONE = settings.STANDALONE
except AttributeError: # pragma: no cover
STANDALONE = False
urlpatterns = [
url(r'^$',
LandingView.as_view(), name='pfc-landing'),
url(r'^understanding-financial-aid-offers/',
include('paying_for_college.disclosures.urls',
namespace='disclosures')),
url(r'^repaying-student-debt/$',
StandAloneView.as_view(template_name='repay_student_debt.html'),
name='pfc-repay'),
url(r'^choosing-a-student-loan/$',
StandAloneView.as_view(template_name='choose_a_loan.html'),
name='pfc-choose'),
url(r'^managing-college-money/$',
StandAloneView.as_view(template_name='manage_your_money.html'),
name='pfc-manage'),
]
if STANDALONE:
urlpatterns += [
url(r'^paying-for-college/admin/', include(admin.site.urls)),
url(r'^paying-for-college/$',
LandingView.as_view(), name='standalone:pfc-landing'),
url(r'^paying-for-college/compare-financial-aid-and-college-cost/',
include('paying_for_college.disclosures.urls',
namespace='standalone-disclosures')),
url(r'^paying-for-college/repaying-student-debt/',
StandAloneView.as_view(template_name='repay_student_debt.html'),
name='standalone-pfc-repay'),
url(r'^paying-for-college/choosing-a-student-loan/$',
StandAloneView.as_view(template_name='choose_a_loan.html'),
name='standalone-pfc-choose'),
url(r'^paying-for-college/managing-college-money/$',
StandAloneView.as_view(template_name='manage_your_money.html'),
name='standalone-pfc-manage'),
]
|
Python
| 0
|
@@ -1185,15 +1185,21 @@
ege/
-compare
+understanding
-fin
@@ -1213,24 +1213,14 @@
aid-
-and-college-cost
+offers
/',%0A
|
d9fc68431c2ff8be94d2e2b13a5c8c80e67dacb2
|
Update folder name.
|
openrcv_setup/utils.py
|
openrcv_setup/utils.py
|
import glob
import logging
import os
from pathlib import Path
from subprocess import check_output
import webbrowser
from setuptools import Command
DOCS_PATH = "docs"
DOCS_BUILD_PATH = os.path.join(DOCS_PATH, "build")
ENCODING = 'utf-8'
LONG_DESCRIPTION_PATH = "setup_long_description.rst"
README_PATH = "README.md"
# We do not need to actually import the pandoc filters.
PANDOC_FILTER_DIR = "pandocfilters"
PANDOC_HTML_FILTER = "htmlfilter.py"
PANDOC_RST_FILTER = "rstfilter.py"
log = logging.getLogger(os.path.basename(__name__))
def ensure_dir(path):
if not os.path.exists(path):
log.info("creating dir: %s" % path)
os.makedirs(path)
def read(path, encoding=None):
if encoding is None:
encoding = ENCODING
# This implementation was chosen to be compatible across Python 2/3.
with open(path, 'r', encoding=ENCODING) as f:
text = f.read()
return text
def write(text, path, description=None):
"""Write a string to a file."""
desc = ('%s ' % description) if description else ''
log.info("writing %sto: %s" % (desc, path))
with open(path, 'w', encoding=ENCODING) as f:
f.write(text)
def run_pandoc(args):
args = ['pandoc'] + args
log.info("running pandoc in a subprocess: %r" % " ".join(args))
try:
stdout = check_output(args)
except FileNotFoundError as err:
msg = ("pandoc not found:\n"
" -->%s\n"
" Did you install pandoc? See the documentation for more info." % err)
raise Exception(msg)
return stdout
def run_pandoc_filter(filter_name, output_format, source_path, target_path):
"""
Example:
$ pandoc --filter pandocfilters/htmlfilter.py --write=html \
--output docs/build/README.html README.md
"""
filter_path = os.path.join(PANDOC_FILTER_DIR, filter_name)
return run_pandoc(["--filter", filter_path, "--write=%s" % output_format,
"--output", target_path, source_path])
def html_target_path(rel_path):
return os.path.join(DOCS_BUILD_PATH, rel_path)
def md2html(md_path):
opath = Path(md_path)
target_path = html_target_path(str(opath.with_suffix(".html")))
run_pandoc_filter(PANDOC_HTML_FILTER, "html", md_path, target_path)
return target_path
def build_html():
ensure_dir(DOCS_BUILD_PATH)
target_readme_path = md2html(README_PATH)
ensure_dir(html_target_path(DOCS_PATH))
md_paths = glob.glob(os.path.join(DOCS_PATH, "*.md"))
for md_path in md_paths:
md2html(md_path)
readme_opath = Path(target_readme_path)
uri = readme_opath.resolve().as_uri()
log.info("opening web browser to: %s\n-->%s" % (target_readme_path, uri))
webbrowser.open(uri)
def update_long_description():
return run_pandoc_filter(PANDOC_RST_FILTER, "rst", "README.md",
"setup_long_description.rst")
class CommandBase(Command):
description = None
# The following three must all be present to avoid errors.
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self._run()
except FileNotFoundError as err:
# Raise a new exception because distutils/setuptools does
# not display the stack trace for these types of errors.
raise Exception("error occurred during setuptools command")
class BuildHtmlCommand(CommandBase):
description = "Build HTML documentation from markdown files."
def _run(self):
build_html()
class LongDescriptionCommand(CommandBase):
description = "Update the reST long_description file."
def _run(self):
update_long_description()
|
Python
| 0
|
@@ -390,24 +390,25 @@
IR = %22pandoc
+_
filters%22%0APAN
|
43b4910e004e7096addb3d50e8a0a6c307a669c6
|
Remove dead get_body_parameter_name_override
|
lepo/apidef/operation/openapi.py
|
lepo/apidef/operation/openapi.py
|
from lepo.apidef.operation.base import Operation
from lepo.apidef.parameter.openapi import OpenAPI3BodyParameter, OpenAPI3Parameter
from lepo.utils import maybe_resolve
class OpenAPI3Operation(Operation):
parameter_class = OpenAPI3Parameter
body_parameter_class = OpenAPI3BodyParameter
def _get_body_parameter(self):
for source in (
self.path.mapping.get('requestBody'),
self.data.get('requestBody'),
):
if source:
source = maybe_resolve(source, self.api.resolve_reference)
body_parameter = self.body_parameter_class(data=source, operation=self, api=self.api)
# TODO: Document x-lepo-body-name
body_parameter.name = self.data.get('x-lepo-body-name', body_parameter.name)
return body_parameter
def get_body_parameter_name_override(self):
return
def get_parameter_dict(self):
parameter_dict = super().get_parameter_dict()
for parameter in parameter_dict.values():
if parameter.in_body: # pragma: no cover
raise ValueError('Regular parameter declared to be in body while parsing OpenAPI 3')
body_parameter = self._get_body_parameter()
if body_parameter:
parameter_dict[body_parameter.name] = body_parameter
return parameter_dict
|
Python
| 0.000064
|
@@ -838,72 +838,8 @@
er%0A%0A
- def get_body_parameter_name_override(self):%0A return%0A%0A
|
b5a6d540f5fdef37b1d58fc45921737e3c77ae96
|
fix user autocomplete
|
let_me_app/views/autocomplete.py
|
let_me_app/views/autocomplete.py
|
from dal import autocomplete
from slugify import slugify
from let_me_auth.models import User
from let_me_app.models import Equipment, StaffRole
class UserAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return User.objects.none()
qs = User.objects.all()
if self.q:
qs = User.objects.filter(first_name__istartswith=self.q)
qs = qs | User.objects.filter(last_name__istartswith=self.q)
qs = qs | User.objects.filter(email__istartswith=self.q)
return qs
class EquipmentAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return Equipment.objects.none()
qs = Equipment.objects.all()
if self.q:
qs = Equipment.objects.filter(name__istartswith=self.q)
return qs
class StaffRoleAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return StaffRole.objects.none()
qs = StaffRole.objects.all()
if self.q:
qs = StaffRole.objects.filter(name__istartswith=self.q)
return qs
|
Python
| 0.000066
|
@@ -142,67 +142,1046 @@
ole%0A
-%0A%0Aclass UserAutocomplete(autocomplete.Select2QuerySetView):
+from let_me_auth.social.pipeline import ABSENT_MAIL_HOST%0Aimport re%0A%0A%0Aclass UserAutocomplete(autocomplete.Select2QuerySetView):%0A create_field = 'username'%0A%0A def create_object(self, text):%0A cell_phone = re.findall(r'%5C+?(%5Cd%7B9,12%7D)', text)%0A if cell_phone:%0A cell_phone = cell_phone%5B0%5D%0A text = re.sub(r'%5C+?(%5Cd%7B9,12%7D)', '', text).strip()%0A parts = text.split(' ', 1)%0A first_name = parts%5B0%5D.strip()%0A email_parts = %5Bslugify(first_name)%5D%0A defaults = %7B'first_name': first_name%7D%0A if len(parts) %3E 1:%0A last_name = parts%5B1%5D.strip()%0A defaults%5B'last_name'%5D = last_name%0A email_parts.append(slugify(last_name))%0A email = '@'.join(%5B'.'.join(email_parts), ABSENT_MAIL_HOST%5D)%0A if cell_phone:%0A required = %7B'cell_phone': cell_phone%7D%0A defaults.update(%7B'email': email%7D)%0A else:%0A required = %7B'email': email%7D%0A user, _ = User.objects.get_or_create(defaults=defaults, **required)%0A return user%0A
%0A
|
060576768e02c0499282770dd22e35048d62b12e
|
Improve clarity of session finish function
|
tests/conftest.py
|
tests/conftest.py
|
from __future__ import print_function
import os
import boto
import pytest
from boto.s3.key import Key as S3Key
from boto.exception import NoAuthHandlerFound
from os.path import join, isfile
s3_bucket = "bokeh-travis"
s3 = "https://s3.amazonaws.com/%s" % s3_bucket
build_id = os.environ.get("TRAVIS_BUILD_ID")
# Can we make this not hard coded and read in the report location from pytest?
report_file = "tests/pytest-report.html"
def pytest_sessionfinish(session, exitstatus):
try_upload = os.environ.get("UPLOAD_PYTEST_HTML", "False") == "True"
report_ready = isfile(report_file)
if try_upload and report_ready:
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(s3_bucket)
upload = True
except NoAuthHandlerFound:
print("Upload was requested but could not connect to S3.")
upload = False
if upload is True:
with open(report_file, "r") as f:
html = f.read()
filename = join(build_id, "report.html")
key = S3Key(bucket, filename)
key.set_metadata("Content-Type", "text/html")
key.set_contents_from_string(html, policy="public-read")
print("\n%s Access report at: %s" % ("---", join(s3, filename)))
@pytest.fixture(scope="session")
def capabilities(capabilities):
capabilities["browserName"] = "firefox"
capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
return capabilities
|
Python
| 0
|
@@ -179,16 +179,8 @@
join
-, isfile
%0A%0As3
@@ -469,24 +469,15 @@
s):%0A
+%0A
-try_upload =
+if
os.
@@ -523,17 +523,17 @@
e%22)
-=
+!
= %22True%22
%0A
@@ -532,83 +532,140 @@
rue%22
-%0A report_ready = isfile(report_file)%0A if try_upload and report_
+:%0A return%0A%0A if hasattr(session.config, 'slaveinput'):%0A # when slave nodes (xdist) finish, the report won't be
ready
-:
%0A
@@ -673,17 +673,25 @@
+return%0A%0A
try:%0A
-
@@ -727,20 +727,16 @@
-
bucket =
@@ -767,199 +767,9 @@
et)%0A
- upload = True%0A except NoAuthHandlerFound:%0A print(%22Upload was requested but could not connect to S3.%22)%0A upload = False%0A%0A if upload is True:%0A
+%0A
@@ -818,20 +818,16 @@
-
html = f
@@ -834,25 +834,25 @@
.read()%0A
-
+%0A
filen
@@ -843,19 +843,16 @@
-
filename
@@ -892,20 +892,16 @@
-
key = S3
@@ -922,36 +922,32 @@
lename)%0A
-
-
key.set_metadata
@@ -972,28 +972,24 @@
text/html%22)%0A
-
key.
@@ -1049,20 +1049,16 @@
-
-
print(%22%5C
@@ -1115,16 +1115,204 @@
ame)))%0A%0A
+ except NoAuthHandlerFound:%0A print(%22Upload was requested but could not connect to S3.%22)%0A%0A except OSError:%0A print(%22Upload was requested but report was not generated.%22)%0A%0A
%0A@pytest
|
e7ddc72505057326c94f608551a08583836f1043
|
fix typo.
|
rio/utils/http.py
|
rio/utils/http.py
|
# -*- coding: utf-8 -*-
"""
rio.utils.http
~~~~~~~~~~~~~~
"""
import warnings
from urllib import urlencode
import six
import requests
from requests.exceptions import SSLError
from flask import current_app
from rio.core import celery
from rio.signals import webhook_ran
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError
except ImportError:
class ZeroReturnError(Exception):
pass
def get_user_agent():
return 'sentry/%s' % current_app.config.get('RIO_VERSION')
def build_session():
"""
Rio requests session will attach rio identity in header.
"""
session = requests.Session()
session.headers.update({'User-Agent': get_user_agent()})
return session
def raven_context(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
headers = {
'User-Agent': get_user_agent(),
}
if json:
headers.setdefault('Content-Type', 'application/json')
if params:
query_string = urlencode(params)
else:
query_string = None
if json:
data = json
if not method:
method = 'POST' if (data or json) else 'GET'
return {
'method': method,
'url': url,
'query_string': query_string,
'data': data,
'headers': headers,
}
def urlopen(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn('user_agent is no longer used with safe_urlopen')
session = build_session()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
try:
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
# Our version of requests does not transform ZeroReturnError into an
# appropriately generically catchable exception
except ZeroReturnError as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(SSLError, exc, exc_tb)
del exc_tb
# requests' attempts to use chardet internally when no encoding is found
# and we want to avoid that slow behavior
if not response.encoding:
response.encoding = 'utf-8'
return response
def urlread(response):
return response.content
def is_success_response(response):
return 200 <= response.status_code < 300
def is_failure_response(response):
return 500 <= response.status_code < 600
def is_invalid_response(response):
return 400 <= response.status_code < 500
class FailureWebhookError(Exception):
pass
class InvalidResponseError(FailureWebhookError):
"""The remote server gave an invalid response."""
class RemoteExecuteError(FailureWebhookError):
"""The remote task gave a custom error."""
class UnknownStatusError(FailureWebhookError):
"""The remote server gave an unknown status."""
def extract_response(raw_response):
"""Extract requests response object.
only extract those status_code in [200, 300).
:param raw_response: a requests.Resposne object.
:return: content of response.
"""
data = urlread(raw_response)
if is_success_response(raw_response):
return data
elif is_failure_response(raw_response):
raise RemoteExecuteError(data)
elif is_invalid_response(raw_response):
raise InvalidResponseError(data)
else:
raise UnknownStatusError(data)
def dispatch_webhook_request(url=None, method='GET', params=None,
json=None, headers=None, timeout=5):
"""Task dispatching to an URL.
:param url: The URL location of the HTTP callback task.
:param method: Method to use when dispatching the callback. Usually
`GET` or `POST`.
:param params: Keyword arguments to pass on to the HTTP callback.
:param json: JSON as body to pass on to the POST HTTP callback.
:param headers: HTTP headers applied to callback.
"""
if method == 'GET':
resp = urlopen(url, method, params=params, headers=headers)
elif method == 'POST':
resp = urlopen(url, method, json=json, headers=headers)
else:
raise NotImplementedError
return extract_response(resp)
|
Python
| 0.00016
|
@@ -496,14 +496,11 @@
rn '
-sentry
+rio
/%25s'
|
705ff08853542140b8f3e2a575cb73ee3a5db017
|
support put and delete in task.
|
rio/utils/http.py
|
rio/utils/http.py
|
# -*- coding: utf-8 -*-
"""
rio.utils.http
~~~~~~~~~~~~~~
"""
import warnings
from urllib import urlencode
import six
import requests
from requests.exceptions import SSLError
from flask import current_app
from rio.core import celery
from rio.signals import webhook_ran
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError
except ImportError:
class ZeroReturnError(Exception):
pass
def get_user_agent():
return 'rio/%s' % current_app.config.get('RIO_VERSION')
def build_session():
"""
Rio requests session will attach rio identity in header.
"""
session = requests.Session()
session.headers.update({'User-Agent': get_user_agent()})
return session
def raven_context(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
headers = {
'User-Agent': get_user_agent(),
}
if json:
headers.setdefault('Content-Type', 'application/json')
if params:
query_string = urlencode(params)
else:
query_string = None
if json:
data = json
if not method:
method = 'POST' if (data or json) else 'GET'
return {
'method': method,
'url': url,
'query_string': query_string,
'data': data,
'headers': headers,
}
def urlopen(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn('user_agent is no longer used with safe_urlopen')
session = build_session()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
try:
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
# Our version of requests does not transform ZeroReturnError into an
# appropriately generically catchable exception
except ZeroReturnError as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(SSLError, exc, exc_tb)
del exc_tb
# requests' attempts to use chardet internally when no encoding is found
# and we want to avoid that slow behavior
if not response.encoding:
response.encoding = 'utf-8'
return response
def urlread(response):
return response.content
def is_success_response(response):
return 200 <= response.status_code < 300
def is_failure_response(response):
return 500 <= response.status_code < 600
def is_invalid_response(response):
return 400 <= response.status_code < 500
class FailureWebhookError(Exception):
pass
class InvalidResponseError(FailureWebhookError):
"""The remote server gave an invalid response."""
class RemoteExecuteError(FailureWebhookError):
"""The remote task gave a custom error."""
class UnknownStatusError(FailureWebhookError):
"""The remote server gave an unknown status."""
def extract_response(raw_response):
"""Extract requests response object.
only extract those status_code in [200, 300).
:param raw_response: a requests.Resposne object.
:return: content of response.
"""
data = urlread(raw_response)
if is_success_response(raw_response):
return data
elif is_failure_response(raw_response):
raise RemoteExecuteError(data)
elif is_invalid_response(raw_response):
raise InvalidResponseError(data)
else:
raise UnknownStatusError(data)
def dispatch_webhook_request(url=None, method='GET', params=None,
json=None, data=None, headers=None, timeout=5):
"""Task dispatching to an URL.
:param url: The URL location of the HTTP callback task.
:param method: Method to use when dispatching the callback. Usually
`GET` or `POST`.
:param params: Keyword arguments to pass on to the HTTP callback.
:param json: JSON as body to pass on to the POST HTTP callback.
:param headers: HTTP headers applied to callback.
"""
if method == 'GET':
resp = urlopen(url, method, params=params, headers=headers)
elif method == 'POST':
resp = urlopen(url, method, json=json, data=data, headers=headers)
else:
raise NotImplementedError
return extract_response(resp)
|
Python
| 0
|
@@ -4863,17 +4863,36 @@
hod
-==
+in (
'POST'
+, 'DELETE', 'PUT')
:%0A
|
36a00bd6ece27b89843a856cd2b99d25a1d0e4d3
|
Modify conftest.py to support Python 3.5 only
|
tests/conftest.py
|
tests/conftest.py
|
# -*- coding: utf-8 -*-
"""Used by pytest to do some preparation work before running tests."""
#
# (C) Pywikibot team, 2016-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import sys
def pytest_configure(config):
"""Set the sys._test_runner_pytest flag to True, if pytest is used."""
sys._test_runner_pytest = True
|
Python
| 0.000002
|
@@ -123,10 +123,10 @@
6-20
-18
+20
%0A#%0A#
@@ -180,76 +180,8 @@
.%0A#%0A
-from __future__ import absolute_import, division, unicode_literals%0A%0A
impo
|
d3fbe9934329df1b1c5f752e4a43981b4fc8beae
|
Use pathlib.Path
|
tests/conftest.py
|
tests/conftest.py
|
import pytest
from _pytest.compat import LEGACY_PATH
from libvcs.shortcuts import create_repo_from_pip_url
from libvcs.util import run
@pytest.fixture(scope="function")
def tmpdir_repoparent(tmpdir_factory):
"""Return temporary directory for repository checkout guaranteed unique."""
fn = tmpdir_factory.mktemp("repo")
return fn
@pytest.fixture
def git_repo_kwargs(tmpdir_repoparent, git_dummy_repo_dir):
"""Return kwargs for :func:`create_repo_from_pip_url`."""
repo_name = "repo_clone"
return {
"url": "git+file://" + git_dummy_repo_dir,
"parent_dir": str(tmpdir_repoparent),
"name": repo_name,
}
@pytest.fixture
def git_repo(git_repo_kwargs):
"""Create an git repository for tests. Return repo."""
git_repo = create_repo_from_pip_url(**git_repo_kwargs)
git_repo.obtain(quiet=True)
return git_repo
@pytest.fixture
def create_git_dummy_repo(tmpdir_repoparent):
def fn(repo_name, testfile_filename="testfile.test"):
repo_path = str(tmpdir_repoparent.join(repo_name))
run(["git", "init", repo_name], cwd=str(tmpdir_repoparent))
run(["touch", testfile_filename], cwd=repo_path)
run(["git", "add", testfile_filename], cwd=repo_path)
run(["git", "commit", "-m", "test file for %s" % repo_name], cwd=repo_path)
return repo_path
yield fn
@pytest.fixture
def git_dummy_repo_dir(tmpdir_repoparent, create_git_dummy_repo):
"""Create a git repo with 1 commit, used as a remote."""
return create_git_dummy_repo("dummyrepo")
@pytest.fixture
def config_dir(tmpdir: LEGACY_PATH):
conf_dir = tmpdir.join(".vcspull")
conf_dir.ensure(dir=True)
return conf_dir
|
Python
| 0.000003
|
@@ -1,12 +1,28 @@
+import pathlib%0A%0A
import pytes
@@ -207,27 +207,35 @@
rent(tmp
-dir_factory
+_path: pathlib.Path
):%0A %22
@@ -325,34 +325,13 @@
tmp
-dir_factory.mktemp(%22repo%22)
+_path
%0A
@@ -396,16 +396,30 @@
poparent
+: pathlib.Path
, git_du
@@ -1049,22 +1049,19 @@
poparent
-.join(
+ /
repo_nam
@@ -1062,17 +1062,16 @@
po_name)
-)
%0A%0A
|
08537185e2bbc7790dc0b7cd45b03b9ce0392bc6
|
fix bug
|
tds/parser.py
|
tds/parser.py
|
import struct
from StringIO import StringIO
from io import BytesIO
from .encrypt import decrypt
class PreLoginPacket(object):
def __init__(self, buff):
pass
class LoginPacket(object):
FIELDS = ('client_name', 'username', 'password', 'app_name', 'server_name',
'unknown1', 'lib_name', 'locale', 'database')
def __init__(self, buf):
"""
:param BytesIO buf:
"""
params = struct.unpack('<LLLLLLBBBBLL', buf.read(36))
self.packet_length = params[0]
self.tds_version = params[1]
self.tds_version = params[2]
self.client_version = params[3]
self.client_pid = params[4]
self.connection_id = params[5]
self.option_flags1 = params[6]
self.option_flags2 = params[7]
self.sql_type_flags = params[8]
self.reserved_flags = params[9]
self.time_zone = params[10]
self.collation = params[11]
fields = []
for field_name in self.FIELDS:
offset, length = struct.unpack('<HH', buf.read(4))
fields.append((field_name, offset, length))
for field_name, offset, length in fields:
if length:
buf.seek(offset)
value = buf.read(length * 2)
value = ''.join([c for c in value if c != '\x00'])
object.__setattr__(self, field_name, value)
self.password = decrypt(self.password)
class Parser(object):
PACKET_TYPES = {
0x12: PreLoginPacket,
0x10: LoginPacket
}
def __init__(self, conn):
self.conn = conn # type: BytesIO
def parse(self):
"""
:rtype:
"""
header, data = self.parse_message_header()
if header.packet_type in self.PACKET_TYPES:
packet_class = self.PACKET_TYPES.get(header.packet_type)
packet = packet_class(data)
print(packet)
def parse_message_header(self):
"""
:rtype: (PacketHeader, str)
"""
header = self.conn.read(8)
packet_header = PacketHeader(header)
data = self.conn.read(packet_header.length)
return packet_header, StringIO(data)
class PacketHeader(object):
FMT = "!BBHHBB"
def __init__(self, content):
"""
:param str content:
"""
packet_type, status, length, pid, packet_id, window = struct.unpack(self.FMT, content)
self.packet_type = packet_type
self.status = status
self.length = length
self.pid = pid
self.packet_id = packet_id
self.window = window
def __repr__(self):
return str(self)
def __str__(self):
return struct.pack(self.FMT, self.packet_type, self.status, self.length,
self.pid, self.packet_id, self.window)
def mock():
data = """
10 01 00 f6 00 00 00 00 ee 00 00 00 01 00 00 71
00 10 00 00 06 83 f2 f8 e0 23 00 00 00 00 00 00
f0 01 00 00 88 ff ff ff 36 04 00 00 56 00 08 00
66 00 06 00 72 00 0a 00 86 00 0d 00 a0 00 10 00
00 00 00 00 c0 00 0a 00 d4 00 0a 00 e8 00 03 00
00 00 00 00 00 00 ee 00 00 00 ee 00 00 00 57 00
43 00 4d 00 49 00 53 00 30 00 33 00 35 00 43 00
54 00 49 00 44 00 62 00 6f 00 e1 a5 f3 a5 c2 a5
a1 a5 91 a5 e0 a5 31 a5 e3 a5 83 a5 a6 a5 70 00
79 00 6d 00 73 00 73 00 71 00 6c 00 3d 00 32 00
2e 00 31 00 2e 00 33 00 53 00 31 00 44 00 53 00
51 00 4c 00 30 00 34 00 5c 00 45 00 48 00 49 00
53 00 53 00 51 00 4c 00 44 00 42 00 2d 00 4c 00
69 00 62 00 72 00 61 00 72 00 79 00 75 00 73 00
5f 00 65 00 6e 00 67 00 6c 00 69 00 73 00 68 00
43 00 54 00 49 00
"""
stream = data.split()
stream = ''.join([chr(int(c, 16)) for c in stream])
return StringIO(stream)
|
Python
| 0
|
@@ -3876,10 +3876,8 @@
tream)%0D%0A
-%0D%0A
|
0c712dddf2d0906c5b9444ebcbaa131f6bce1c62
|
Simplify the datasets example.
|
examples/mayavi/advanced_visualization/datasets.py
|
examples/mayavi/advanced_visualization/datasets.py
|
"""
A Mayavi example to show the different data sets. See
:ref:`data-structures-used-by-mayavi` for a discussion.
The following images are created:
.. hlist::
* **ImageData**
.. image:: ../image_data.jpg
:scale: 50
* **RectilinearGrid**
.. image:: ../rectilinear_grid.jpg
:scale: 50
* **StructuredGrid**
.. image:: ../structured_grid.jpg
:scale: 50
* **UnstructuredGrid**
.. image:: ../unstructured_grid.jpg
:scale: 50
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD style.
from numpy import array, random, linspace, pi, ravel, cos, sin, empty
from enthought.tvtk.api import tvtk
from enthought.mayavi.sources.vtk_data_source import VTKDataSource
from enthought.mayavi import mlab
def image_data():
data = random.random((3, 3, 3))
i = tvtk.ImageData(spacing=(1, 1, 1), origin=(0, 0, 0))
i.point_data.scalars = data.ravel()
i.point_data.scalars.name = 'scalars'
i.dimensions = data.shape
return i
def rectilinear_grid():
data = random.random((3, 3, 3))
r = tvtk.RectilinearGrid()
r.point_data.scalars = data.ravel()
r.point_data.scalars.name = 'scalars'
r.dimensions = data.shape
r.x_coordinates = array((0, 0.7, 1.4))
r.y_coordinates = array((0, 1, 3))
r.z_coordinates = array((0, .5, 2))
return r
def generate_annulus(r, theta, z):
""" Generate points for structured grid for a cylindrical annular
volume. This method is useful for generating a unstructured
cylindrical mesh for VTK (and perhaps other tools).
"""
# Find the x values and y values for each plane.
x_plane = (cos(theta)*r[:,None]).ravel()
y_plane = (sin(theta)*r[:,None]).ravel()
# Allocate an array for all the points. We'll have len(x_plane)
# points on each plane, and we have a plane for each z value, so
# we need len(x_plane)*len(z) points.
points = empty([len(x_plane)*len(z),3])
# Loop through the points for each plane and fill them with the
# correct x,y,z values.
start = 0
for z_plane in z:
end = start+len(x_plane)
# slice out a plane of the output points and fill it
# with the x,y, and z values for this plane. The x,y
# values are the same for every plane. The z value
# is set to the current z
plane_points = points[start:end]
plane_points[:,0] = x_plane
plane_points[:,1] = y_plane
plane_points[:,2] = z_plane
start = end
return points
def structured_grid():
# Make the data.
dims = (3, 4, 3)
r = linspace(5, 15, dims[0])
theta = linspace(0, 0.5*pi, dims[1])
z = linspace(0, 10, dims[2])
pts = generate_annulus(r, theta, z)
sgrid = tvtk.StructuredGrid(dimensions=(dims[1], dims[0], dims[2]))
sgrid.points = pts
s = random.random((dims[0]*dims[1]*dims[2]))
sgrid.point_data.scalars = ravel(s.copy())
sgrid.point_data.scalars.name = 'scalars'
return sgrid
def unstructured_grid():
points = array([[0,1.2,0.6], [1,0,0], [0,1,0], [1,1,1], # tetra
[1,0,-0.5], [2,0,0], [2,1.5,0], [0,1,0],
[1,0,0], [1.5,-0.2,1], [1.6,1,1.5], [1,1,1], # Hex
], 'f')
# The cells
cells = array([4, 0, 1, 2, 3, # tetra
8, 4, 5, 6, 7, 8, 9, 10, 11 # hex
])
# The offsets for the cells, i.e. the indices where the cells
# start.
offset = array([0, 5])
tetra_type = tvtk.Tetra().cell_type # VTK_TETRA == 10
hex_type = tvtk.Hexahedron().cell_type # VTK_HEXAHEDRON == 12
cell_types = array([tetra_type, hex_type])
# Create the array of cells unambiguously.
cell_array = tvtk.CellArray()
cell_array.set_cells(2, cells)
# Now create the UG.
ug = tvtk.UnstructuredGrid(points=points)
# Now just set the cell types and reuse the ug locations and cells.
ug.set_cells(cell_types, offset, cell_array)
scalars = random.random(points.shape[0])
ug.point_data.scalars = scalars
ug.point_data.scalars.name = 'scalars'
return ug
def polydata():
# The numpy array data.
points = array([[0,-0.5,0], [1.5,0,0], [0,1,0], [0,0,0.5],
[-1,-1.5,0.1], [0,-1, 0.5], [-1, -0.5, 0],
[1,0.8,0]], 'f')
triangles = array([[0,1,3], [1,2,3], [1,0,5],
[2,3,4], [3,0,4], [0,5,4], [2, 4, 6],
[2, 1, 7]])
scalars = random.random(points.shape)
# The TVTK dataset.
mesh = tvtk.PolyData(points=points, polys=triangles)
mesh.point_data.scalars = scalars
mesh.point_data.scalars.name = 'scalars'
return mesh
def view(dataset):
""" Open up a mayavi scene and display the dataset in it.
"""
engine = mlab.get_engine()
fig = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0),
figure=dataset.class_name[3:])
src = VTKDataSource(data=dataset)
engine.add_source(src)
mlab.pipeline.surface(src, opacity=0.1)
mlab.pipeline.surface(mlab.pipeline.extract_edges(src),
color=(0, 0, 0), )
@mlab.show
def main():
view(image_data())
view(rectilinear_grid())
view(structured_grid())
view(unstructured_grid())
view(polydata())
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -4908,39 +4908,8 @@
%22%22%22%0A
- engine = mlab.get_engine()%0A
@@ -5027,72 +5027,13 @@
s
-rc = VTKDataSource(data=dataset)%0A engine.add_source(src) %0A
+urf =
mla
@@ -5051,19 +5051,23 @@
surface(
-src
+dataset
, opacit
@@ -5128,18 +5128,19 @@
_edges(s
-rc
+urf
),%0A
|
8913f5d6a06e0f25d1c8c1a45e0f5b4da8cbf421
|
bump version
|
rodeo/__init__.py
|
rodeo/__init__.py
|
__version__ = "0.0.2"
|
Python
| 0
|
@@ -1,6 +1,4 @@
-%0A%0A
__ve
@@ -14,9 +14,9 @@
%220.
-0.2
+1.0
%22%0A
|
56b1ef461cfce11ad5e08a031abf175ed73c2081
|
Add radius2fov and imagexy_to_pixelXY functions. Clean import.
|
coordinate_transformations.py
|
coordinate_transformations.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 22:35:50 2017
@author: lauri.kangas
"""
import numpy as np
from numpy import sin,cos,arccos,arctan2,mod,pi
from projections import stereographic
def rotate_RADEC(RAs, DECs, center_RA, center_DEC, output='xyz'):
# rotate RA,DEC coordinates to turn center_RA,center_DEC to origin
# RA can be rotated first
RArotated_RAs = mod(RAs - center_RA, 2*pi)
# convert to rectangular coordinates
RArotated_x, \
RArotated_y, \
RArotated_z = RADEC_to_xyz(RArotated_RAs, DECs)
# now we can rotate by center_DEC.
RADECrotated_x, \
RADECrotated_y, \
RADECrotated_z = tilt_xyz_y(RArotated_x, \
RArotated_y, \
RArotated_z, center_DEC)
if output.lower() == 'xyz':
return RADECrotated_x, RADECrotated_y, RADECrotated_z
elif output.lower() == 'radec':
# calculate RA/DEC again
return None
def RADEC_to_xyz(RA, DEC):
x = cos(RA)*cos(DEC)
y = sin(RA)*cos(DEC)
z = sin(DEC)
return x,y,z
def tilt_xyz_y(x, y, z, angle, x_only=False):
# tilt xyz coordinates along y_axis by amount angle
# x_only: if only radius matters, (for gsc region selection),
# don't calculate y and z
xx = x*cos(angle)+z*sin(angle)
if x_only:
return xx
yy = y
zz = -x*sin(angle)+z*cos(angle)
return xx,yy,zz
def xyz_radius_from_origin(x, *args):
return arccos(x)
def fov_radius(fov, projection=stereographic):
# return half-diagonal radius of rectangular fov of given width/height
# with given projection
fov = np.radians(np.array(fov)) # if fov wasn't already array
half_fov_angle = fov/2
half_fov_imageplane = projection(half_fov_angle)
half_diagonal_imageplane = np.hypot(*half_fov_imageplane)
half_diagonal_radians = projection(half_diagonal_imageplane, inverse=True)
return np.degrees(half_diagonal_radians)
def xyz_to_imagexy(x, y, z, \
rotation=0, projection=stereographic, include_R=False):
# project xyz coordinates on a sphere to image plane
# R can be returned for filtering GSR regions
# calculate angular distance from image center along sphere
R = xyz_radius_from_origin(x)
r = projection(R)
# polar angle of region coordinates in image plane
T = arctan2(z, y)
T += rotation
image_x = -r * cos(T)
image_y = r * sin(T)
if include_R:
return image_x, image_y, R
return image_x, image_y
|
Python
| 0
|
@@ -152,20 +152,22 @@
,mod,pi%0A
-from
+import
project
@@ -174,29 +174,8 @@
ions
- import stereographic
%0A%0Ade
@@ -1546,32 +1546,44 @@
fov, projection=
+projections.
stereographic):%0A
@@ -2026,16 +2026,740 @@
radians)
+%0A%0Adef radius2fov(radius, aspect_ratio, projection=projections.stereographic):%0A # aspect_ratio = height/width%0A half_diagonal_radians = np.radians(radius)%0A half_diagonal_imageplane = projection(half_diagonal_radians)%0A diagonal_imageplane = 2 * half_diagonal_imageplane%0A %0A width_imageplane = diagonal_imageplane**2 / (1 + aspect_ratio**2)%0A height_imageplane = aspect_ratio*width_imageplane%0A %0A fov_imageplane = np.array(%5Bwidth_imageplane, height_imageplane%5D)%0A half_fov_imageplane = fov_imageplane/2%0A half_fov_radians = projection(half_fov_imageplane, inverse=True)%0A fov_radians = half_fov_radians*2%0A %0A return np.degrees(fov_radians), np.array(%5Bwidth_imageplane, height_imageplane%5D)%0A
%0A %0A%0Ad
@@ -2829,16 +2829,28 @@
jection=
+projections.
stereogr
@@ -3360,9 +3360,628 @@
mage_y%0A%0A
+# transform X/Y star locations from image plane coordinates to pixel coordinates (non-integer)%0A# in: X/Y stars, sensor dimensions, pixel counts%0A%0Adef imagexy_to_pixelXY(xy, sensor_size=None, resolution=None, pixel_scale=None, axis='ij'):%0A # x,y star locations on image plane to X,Y pixel coordinates (non-integer)%0A %0A x, y = xy%0A %0A if axis == 'ij':%0A y *= -1%0A else: # 'xy'%0A pass%0A %0A sensor_width, sensor_height = sensor_size%0A pixels_x, pixels_y = resolution%0A %0A X = (x+sensor_width)/sensor_width*pixels_x/2%0A Y = (y+sensor_height)/sensor_height*pixels_y/2%0A %0A return X, Y
%0A
|
7eb10376b585e56faad4672959f6654f2500a38d
|
Add `one` as shortcut to `dimensionless_unscaled`
|
astropy/units/__init__.py
|
astropy/units/__init__.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<http://code.google.com/p/pynbody/>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from .si import *
from .astrophys import *
from .cgs import *
from .physical import *
from .equivalencies import *
del bases
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys])
|
Python
| 0.999976
|
@@ -694,16 +694,47 @@
bases%0A%0A
+%0Aone = dimensionless_unscaled%0A%0A
# Enable
|
865940bd126c7c45b7c615f751244a46176aca4d
|
Update version to 2.3b2-dev
|
openslides/__init__.py
|
openslides/__init__.py
|
__author__ = 'OpenSlides Team <support@openslides.org>'
__description__ = 'Presentation and assembly system'
__version__ = '2.3b1'
__license__ = 'MIT'
__url__ = 'https://openslides.org'
args = None
|
Python
| 0
|
@@ -125,9 +125,13 @@
2.3b
-1
+2-dev
'%0A__
|
d2eb134115eb0b35a96f8d494dd2a397eb06e4a6
|
Add channel_name on filter ArticleBoxAdmin
|
opps/articles/admin.py
|
opps/articles/admin.py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Post, Album, Article, Link, ArticleSource, ArticleImage
from .models import ArticleBox, ArticleBoxArticles, ArticleConfig
from opps.core.admin import PublishableAdmin
from opps.core.admin import apply_opps_rules
from redactor.widgets import RedactorEditor
from django_thumbor import generate_url
class ArticleImageInline(admin.TabularInline):
model = ArticleImage
fk_name = 'article'
raw_id_fields = ['image']
actions = None
extra = 1
fieldsets = [(None, {'fields': ('image', 'order')})]
class ArticleSourceInline(admin.TabularInline):
model = ArticleSource
fk_name = 'article'
raw_id_fields = ['source']
actions = None
extra = 1
fieldsets = [(None, {
'classes': ('collapse',),
'fields': ('source', 'order')})]
class ArticleBoxArticlesInline(admin.TabularInline):
model = ArticleBoxArticles
fk_name = 'articlebox'
raw_id_fields = ['article']
actions = None
extra = 1
fieldsets = [(None, {
'classes': ('collapse',),
'fields': ('article', 'order')})]
class PostAdminForm(forms.ModelForm):
class Meta:
model = Post
widgets = {'content': RedactorEditor()}
class ArticleAdmin(PublishableAdmin):
prepopulated_fields = {"slug": ["title"]}
readonly_fields = ['get_http_absolute_url', 'short_url']
raw_id_fields = ['main_image', 'channel']
@apply_opps_rules('articles')
class PostAdmin(ArticleAdmin):
form = PostAdminForm
inlines = [ArticleImageInline, ArticleSourceInline]
raw_id_fields = ['main_image', 'channel', 'albums']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'title', 'slug', 'get_http_absolute_url',
'short_url')}),
(_(u'Content'), {
'fields': ('short_title', 'headline', 'content', 'main_image',
'tags')}),
(_(u'Relationships'), {
'fields': ('channel', 'albums',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
@apply_opps_rules('articles')
class AlbumAdmin(ArticleAdmin):
form = AlbumAdminForm
inlines = [ArticleImageInline]
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'title', 'slug', 'get_http_absolute_url',
'short_url',)}),
(_(u'Content'), {
'fields': ('short_title', 'headline', 'main_image', 'tags')}),
(_(u'Relationships'), {
'fields': ('channel',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
@apply_opps_rules('articles')
class LinkAdmin(ArticleAdmin):
raw_id_fields = ['articles', 'channel', 'main_image']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'title', 'slug', 'get_http_absolute_url',
'short_url',)}),
(_(u'Content'), {
'fields': ('short_title', 'headline', 'url', 'articles',
'main_image', 'tags')}),
(_(u'Relationships'), {
'fields': ('channel',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
class ArticleBoxAdmin(PublishableAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['name', 'date_available', 'published']
list_filter = ['date_available', 'published']
inlines = [ArticleBoxArticlesInline]
raw_id_fields = ['channel', 'article', 'queryset']
search_fields = ['name', 'slug']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'name', 'slug')}),
(_(u'Relationships'), {
'fields': ('channel', 'article', 'queryset')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
class HideArticleAdmin(PublishableAdmin):
list_display = ['image_thumb', 'title', 'channel_name', 'date_available',
'published']
readonly_fields = ['image_thumb']
def image_thumb(self, obj):
if obj.main_image:
return u'<img width="60px" height="60px" src="{0}" />'.format(
generate_url(obj.main_image.image.url, width=60, height=60))
return _(u'No Image')
image_thumb.short_description = _(u'Thumbnail')
image_thumb.allow_tags = True
def get_model_perms(self, *args, **kwargs):
return {}
def has_add_permission(self, request):
return False
class ArticleConfigAdmin(PublishableAdmin):
list_display = ['key', 'key_group', 'channel', 'date_insert',
'date_available', 'published']
list_filter = ["key", 'key_group', "channel", "published"]
search_fields = ["key", "key_group", "value"]
admin.site.register(Article, HideArticleAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Album, AlbumAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(ArticleBox, ArticleBoxAdmin)
admin.site.register(ArticleConfig, ArticleConfigAdmin)
|
Python
| 0
|
@@ -3874,16 +3874,32 @@
, 'slug'
+, 'channel_name'
%5D%0A%0A f
|
8a9fa06c36a89e3fde93059cfbe827506d5b8b62
|
Disable exception logging of status code 500 during testing.
|
orchard/errors/e500.py
|
orchard/errors/e500.py
|
# -*- coding: utf-8 -*-
"""
This module sets up the view for handling ``500 Internal Server Error`` errors.
"""
import datetime
import flask
import flask_classful
from orchard.errors import blueprint
class Error500View(flask_classful.FlaskView):
"""
View for ``500 Internal Server Error`` errors.
"""
trailing_slash = False
@blueprint.app_errorhandler(500)
@blueprint.app_errorhandler(Exception)
def index(self) -> str:
"""
Display the error page for internal errors and send a mail to all administrators
information them of this error.
:return: A page explaining the error.
"""
message = ('Time: {time}\n' +
'Request: {method} {path}\n' +
'Agent: {agent_platform} | {agent_browser} {agent_browser_version}\n' +
'Raw Agent: {agent}\n\n'
).format(time = datetime.datetime.now(),
method = flask.request.method,
path = flask.request.path,
agent_platform = flask.request.user_agent.platform,
agent_browser = flask.request.user_agent.browser,
agent_browser_version = flask.request.user_agent.version,
agent = flask.request.user_agent.string)
flask.current_app.logger.exception(message)
return flask.render_template('errors/500.html')
Error500View.register(blueprint)
|
Python
| 0
|
@@ -1390,16 +1390,83 @@
tring)%0A%0A
+ if not flask.current_app.testing: # pragma: no cover.%0A
@@ -1509,16 +1509,17 @@
essage)%0A
+%0A
|
b8d693a8fd2e0fb9fa8592b9672bc71e874547d3
|
Bump version to 0.1.1
|
fancypages/__init__.py
|
fancypages/__init__.py
|
import os
__version__ = (0, 1, 0, 'alpha', 1)
def get_fancypages_paths(path):
""" Get absolute paths for *path* relative to the project root """
return [os.path.join(os.path.dirname(os.path.abspath(__file__)), path)]
def get_apps():
return (
'django_extensions',
# used for image thumbnailing
'sorl.thumbnail',
# framework used for the internal API
'rest_framework',
# provides a convenience layer around model inheritance
# that makes lookup of nested models easier. This is used
# for the content block hierarchy.
'model_utils',
# static file compression and collection
'compressor',
# migration handling
'south',
# package used for twitter block
'twitter_tag',
# actual apps provided by fancypages
'fancypages.assets',
'fancypages',
)
|
Python
| 0.000002
|
@@ -25,17 +25,17 @@
(0, 1,
-0
+1
, 'alpha
|
667f1861c31dc878bc194143dfa52a998afbe1b1
|
Simplify form class init parameters
|
organizations/forms.py
|
organizations/forms.py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.sites.models import get_current_site
from django.utils.translation import ugettext_lazy as _
from organizations.models import Organization, OrganizationUser
from organizations.utils import create_organization
from organizations.backends import invitation_backend
class OrganizationForm(forms.ModelForm):
"""Form class for updating Organizations"""
owner = forms.ModelChoiceField(OrganizationUser.objects.all())
def __init__(self, request, data=None, files=None, auto_id='id_%s',
prefix=None, initial=None, error_class=forms.util.ErrorList,
label_suffix=':', empty_permitted=False, instance=None):
self.request = request
super(OrganizationForm, self).__init__(data=data, files=files,
auto_id=auto_id, prefix=prefix, initial=initial,
error_class=error_class, label_suffix=label_suffix,
empty_permitted=empty_permitted, instance=instance)
self.fields['owner'].queryset = self.instance.organization_users.filter(
is_admin=True, user__is_active=True)
self.fields['owner'].initial = self.instance.owner.organization_user
class Meta:
model = Organization
exclude = ('users', 'is_active')
def save(self, commit=True):
if self.instance.owner.organization_user != self.cleaned_data['owner']:
self.instance.owner = self.cleaned_data['owner']
self.instance.owner.save()
return super(OrganizationForm, self).save(commit=commit)
def clean_owner(self):
owner = self.cleaned_data['owner']
if owner != self.instance.owner.organization_user:
if self.request.user != self.instance.owner.organization_user.user:
raise forms.ValidationError(_("Only the organization owner can change ownerhip"))
return owner
class OrganizationUserForm(forms.ModelForm):
"""Form class for updating OrganizationUsers"""
class Meta:
model = OrganizationUser
exclude = ('organization', 'user')
def clean_is_admin(self):
is_admin = self.cleaned_data['is_admin']
if self.instance.organization.owner.organization_user == self.instance and not is_admin:
raise forms.ValidationError(_("The organization owner must be an admin"))
return is_admin
class OrganizationUserAddForm(forms.ModelForm):
"""Form class for adding OrganizationUsers to an existing Organization"""
email = forms.EmailField(max_length=75)
def __init__(self, request, organization, data=None, files=None, initial=None,
instance=None):
self.request = request
self.organization = organization
super(OrganizationUserAddForm, self).__init__(data=data, initial=initial,
instance=instance)
class Meta:
model = OrganizationUser
exclude = ('user', 'organization')
def save(self, *args, **kwargs):
"""
The save method should create a new OrganizationUser linking the User
matching the provided email address. If not matching User is found it
should kick off the registration process. It needs to create a User in
order to link it to the Organization.
"""
try:
user = User.objects.get(email=self.cleaned_data['email'])
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("This email address has been used multiple times."))
except User.DoesNotExist:
user = invitation_backend().invite_by_email(
self.cleaned_data['email'],
**{'domain': get_current_site(self.request),
'organization': self.organization})
return OrganizationUser.objects.create(user=user,
organization=self.organization,
is_admin=self.cleaned_data['is_admin'])
def clean_email(self):
email = self.cleaned_data['email']
if self.organization.users.filter(email=email):
raise forms.ValidationError(_("There is already an organization member with this email address!"))
return email
class OrganizationAddForm(forms.ModelForm):
"""
Form class for creating a new organization, complete with new owner, including a
User instance, OrganizationUser instance, and OrganizationOwner instance.
"""
email = forms.EmailField(max_length=30)
def __init__(self, request, data=None, files=None, initial=None,
instance=None):
self.request = request
super(OrganizationAddForm, self).__init__(data=data, initial=initial,
instance=instance)
class Meta:
model = Organization
exclude = ('users', 'is_active')
def save(self):
"""
Create the organization, then get the user, then make the owner.
"""
try:
user = User.objects.get(email=self.cleaned_data['email'])
except User.DoesNotExist:
user = invitation_backend().invite_by_email(
self.cleaned_data['email'],
**{'domain': get_current_site(self.request),
'organization': self.cleaned_data['name'],
'sender': self.request.user, 'created': True})
return create_organization(user, self.cleaned_data['name'])
|
Python
| 0.000002
|
@@ -541,491 +541,119 @@
st,
-data=None, files=None, auto_id='id_%25s',%0A prefix=None, initial=None, error_class=forms.util.ErrorList,%0A label_suffix=':', empty_permitted=False, instance=None):%0A self.request = request%0A super(OrganizationForm, self).__init__(data=data, files=files,%0A auto_id=auto_id, prefix=prefix, initial=initial,%0A error_class=error_class, label_suffix=label_suffix,%0A empty_permitted=empty_permitted, instance=instance
+*args, **kwargs):%0A self.request = request%0A super(OrganizationForm, self).__init__(*args, **kwargs
)%0A
@@ -2258,70 +2258,23 @@
on,
-data=None, files=None, initial=None,%0A instance=None
+*args, **kwargs
):%0A
@@ -2402,69 +2402,23 @@
t__(
-data=data, initial=initial,%0A instance=instance
+*args, **kwargs
)%0A%0A
@@ -4065,70 +4065,23 @@
st,
-data=None, files=None, initial=None,%0A instance=None
+*args, **kwargs
):%0A
@@ -4164,69 +4164,23 @@
t__(
-data=data, initial=initial,%0A instance=instance
+*args, **kwargs
)%0A%0A
|
b72f3ce27034ba3f810f205d133445267847f667
|
fix CSRF get request
|
mpweb_core/rester.py
|
mpweb_core/rester.py
|
# coding: utf-8
# https://github.com/materialsproject/pymatgen/blob/1eb2f2f/pymatgen/matproj/rest.py
from __future__ import division, unicode_literals
import os, requests, json, warnings, urlparse
class MPResterBase(object):
"""
A base class to conveniently interface with a REST interface in the style of
the Materials Project. For your own "rester", inherit from MPResterBase and
add convenience functions which return the result of HTTP requests via
`MPResterBase._make_request(<URL>, ..)`. The recommended way to use the
resulting `MPCustomRester` is with the "with" context manager to ensure that
sessions are properly closed after usage::
with MPCustomRester("API_KEY") as m:
m.do_something()
MPResterBase uses the "requests" package, which provides for HTTP connection
pooling.
Args:
api_key (str): A String API key for accessing the REST interface. If
this is None, the code will check if there is a "MAPI_KEY"
environment variable set. If so, it will use that environment
variable. This makes it easier for heavy users to simply add this
environment variable to their setups and MPResterBase can then be
called without any arguments.
endpoint (str): URL of endpoint to access the REST interface. Defaults
to the standard Materials Project REST address, but can be changed
to other urls implementing a similar interface.
"""
def __init__(self, api_key=None,
endpoint="https://www.materialsproject.org/rest/v2"):
if api_key is not None:
self.api_key = api_key
else:
self.api_key = os.environ.get("MAPI_KEY", "")
self.preamble = endpoint
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
def __enter__(self):
"""Support for "with" context."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for "with" context."""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET"):
response = None
url = self.preamble + sub_url
try:
if self.session.cookies.get('csrftoken') is None:
from django.core.urlresolvers import reverse
uri = urlparse.urlparse(self.preamble)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=uri)
domain += uri.path.split('/')[1] # test_site/
domain += reverse('browserid.csrf')
self.session.get(domain)
headers = {"X-CSRFToken": self.session.cookies.get('csrftoken')}
response = self.session.post(url, data=payload, headers=headers) \
if method == "POST" else self.session.get(url, params=payload)
if response.status_code in [200, 400]:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
else:
raise MPResterError(data["error"])
raise MPResterError(
"REST query returned with error status code {}"
.format(response.status_code)
)
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), repr(response.content)) \
if hasattr(response, "content") else str(ex)
raise MPResterError(msg)
class MPResterError(Exception):
"""
Exception class for MPResterBase.
Raised when the query has problems, e.g., bad query format.
"""
pass
|
Python
| 0
|
@@ -2459,17 +2459,16 @@
.netloc%7D
-/
'.format
@@ -2493,24 +2493,25 @@
-domain +
+site_url
= uri.pa
@@ -2560,16 +2560,23 @@
-domain +
+browserid_csrf
= re
@@ -2599,16 +2599,158 @@
.csrf')%0A
+ if site_url%5B:-1%5D not in browserid_csrf:%0A domain += '/' + site_url%0A domain += browserid_csrf%0A
|
4ec73ee0272d904700c7ae126f6d3ef0d8a5e762
|
Work around moz url's busted URL joining
|
nanospider/spider.py
|
nanospider/spider.py
|
from gevent import monkey, queue, pool, spawn
monkey.patch_all()
import requests, traceback, sqlite3, itertools
import url as moz_url
from lxml import etree
from scrapelib import Scraper
from scrapelib.cache import SQLiteCache
def is_html(response):
return 'html' in response.headers.get('content-type', 'text/html').lower()
class SpiderScraper(Scraper):
def __init__(self, cache_path, allowed_hosts=(), requests_per_minute=0, **kwargs):
kwargs['requests_per_minute'] = requests_per_minute
super(SpiderScraper, self).__init__(**kwargs)
self.cache_storage = SQLiteCache(cache_path)
self.cache_write_only = False
self._allowed_hosts = set(allowed_hosts)
def should_cache_response(self, response):
return response.status_code == 200 and \
moz_url.parse(response.url)._host in self._allowed_hosts and \
is_html(response)
class Spider(object):
def __init__(self, domain, cache_path, workers=2, try_sitemap=True, **kwargs):
self.domain = domain
self._queue = queue.JoinableQueue()
self._workers = []
self._worker_count = workers
self._allowed_hosts = set()
self._allowed_hosts.add(domain)
self._scraper = SpiderScraper(cache_path, self._allowed_hosts, **kwargs)
self._build_table()
self._resume_queue()
self._add_to_queue(moz_url.parse("http://%s/" % domain))
def _build_table(self):
self._scraper.cache_storage._conn.execute("""CREATE TABLE IF NOT EXISTS seen
(key text UNIQUE, processed integer)""")
def _add_to_queue(self, url):
uurl = url.utf8()
if url._host in self._allowed_hosts:
# insert it into sqlite, or not
try:
with self._scraper.cache_storage._conn as conn:
conn.execute("INSERT INTO seen values (?, 0)", (uurl,))
self._queue.put(url)
except sqlite3.IntegrityError:
# we've already seen this one
pass
def _resume_queue(self):
# if there's already stuff in the database, repopulate from the queue
for row in self._scraper.cache_storage._conn.execute("SELECT * FROM seen WHERE processed = 0"):
self._queue.put(moz_url.parse(row[0]))
def _scrape_page(self, url):
uurl = url.utf8()
print "Scraping %s..." % uurl
response = self._scraper.get(uurl)
if is_html(response):
parsed = etree.HTML(response.content)
links = parsed.xpath("//a[@href]")
for link in links:
new_link = url.relative(link.attrib['href'])
new_link._fragment = None
self._add_to_queue(new_link.canonical())
# mark this one as processed
with self._scraper.cache_storage._conn as conn:
conn.execute("UPDATE seen SET processed = 1 WHERE key = ?", (uurl,))
def _crawl_worker(self):
while True:
item = self._queue.get()
try:
self._scrape_page(item)
except:
traceback.print_exc()
finally:
self._queue.task_done()
def crawl(self):
for i in range(self._worker_count):
self._workers.append(spawn(self._crawl_worker))
self._queue.join()
# clean up workers
for worker in self._workers:
worker.kill()
self._workers = []
@property
def urls(self):
return itertools.imap(
lambda r: r[0],
self._scraper.cache_storage._conn.execute("SELECT key FROM cache WHERE status = 200")
)
# proxy the scraper's get for convenience
def get(self, *args, **kwargs):
return self._scraper.get(*args, **kwargs)
if __name__ == "__main__":
import sys
s = Spider(sys.argv[1], sys.argv[1] + ".db", workers=4)
s.crawl()
|
Python
| 0
|
@@ -105,16 +105,26 @@
tertools
+, urlparse
%0Aimport
@@ -2657,21 +2657,51 @@
k =
-url.relative(
+moz_url.parse(urlparse.urljoin(url.utf8(),
link
@@ -2716,16 +2716,17 @@
'href'%5D)
+)
%0A
@@ -2756,24 +2756,66 @@
ment = None%0A
+ new_link._userinfo = None%0A
@@ -4005,16 +4005,34 @@
orkers=4
+, retry_attempts=2
)%0A s.
|
1506dda66814b8f51ec2dcbf2e632bdafa98bf75
|
add root node info to form
|
arches/app/views/graph.py
|
arches/app/views/graph.py
|
'''
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
from django.shortcuts import render
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.http import HttpResponseNotFound
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.JSONResponse import JSONResponse
from arches.app.models.resource_graphs import ResourceGraph
from arches.app.models import models
@csrf_exempt
def manager(request, nodeid):
graph = ResourceGraph(nodeid)
branches = JSONSerializer().serializeToPython(models.BranchMetadata.objects.all())
branch_nodes = models.Node.objects.filter(~Q(branchmetadata=None), istopnode=True)
for branch in branches:
branch['graph'] = ResourceGraph(branch_nodes.get(branchmetadata_id=branch['branchmetadataid']))
datatypes = models.DDataType.objects.all()
return render(request, 'graph-manager.htm', {
'main_script': 'graph-manager',
'graph': JSONSerializer().serialize(graph),
'branches': JSONSerializer().serialize(branches),
'datatypes': JSONSerializer().serialize(datatypes),
'node_list': {
'title': _('Node List'),
'search_placeholder': _('Find a node in the graph')
},
'permissions_list': {
'title': _('Permissions'),
'search_placeholder': _('Find a group or user account')
},
'branch_list': {
'title': _('Branch Library'),
'search_placeholder': _('Find a graph branch')
}
})
@csrf_exempt
def node(request, nodeid):
if request.method == 'POST':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
node.name = data.get('name', '')
node.description = data.get('description','')
node.istopnode = data.get('istopnode','')
node.crmclass = data.get('crmclass','')
node.datatype = data.get('datatype','')
node.status = data.get('status','')
node.save()
return JSONResponse(node)
if request.method == 'DELETE':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
edge = models.Edge.objects.get(rangenode=node)
edge.delete()
graph = ResourceGraph(nodeid)
for edge in graph.edges:
edge.delete()
for node in graph.nodes:
node.delete()
return JSONResponse({})
return HttpResponseNotFound
|
Python
| 0
|
@@ -1592,40 +1592,19 @@
-branch%5B'graph'%5D = ResourceGraph(
+rootnode =
bran
@@ -1661,17 +1661,108 @@
ataid'%5D)
-)
+%0D%0A branch%5B'rootnode'%5D = rootnode%0D%0A branch%5B'graph'%5D = ResourceGraph(rootnode)%0D%0A
%0D%0A da
@@ -3754,28 +3754,754 @@
eturn HttpResponseNotFound%0D%0A
+%0D%0A%0D%0A@csrf_exempt%0D%0Adef appendbranch(request, nodeid, branchid):%0D%0A if request.method == 'POST':%0D%0A data = JSONDeserializer().deserialize(request.body)%0D%0A if data:%0D%0A with transaction.atomic():%0D%0A node = models.Node.objects.get(nodeid=nodeid)%0D%0A node.name = data.get('name', '')%0D%0A node.description = data.get('description','')%0D%0A node.istopnode = data.get('istopnode','')%0D%0A node.crmclass = data.get('crmclass','')%0D%0A node.datatype = data.get('datatype','')%0D%0A node.status = data.get('status','')%0D%0A node.save()%0D%0A return JSONResponse(node)%0D%0A%0D%0A return HttpResponseNotFound%0D%0A
|
b699f950eebbe10c400e9867ce8bead02d2f651c
|
Remove another thing.
|
src/txacme/interfaces.py
|
src/txacme/interfaces.py
|
# -*- coding: utf-8 -*-
"""
Interface definitions for txacme.
"""
from zope.interface import Interface
class ITLSSNI01Responder(Interface):
"""
Configuration for a tls-sni-01 challenge responder.
The actual responder may exist somewhere else, this interface is merely for
an object that knows how to configure it.
"""
def start_responding(server_name):
"""
Start responding for a particular challenge.
.. seealso:: `txacme.util.generate_tls_sni_01_cert`
:param str server_name: The server name to respond to: ie.
`u'<hex>.<hex>.acme.invalid'`.
:rtype: `~twisted.internet.defer.Deferred`
:return: A deferred firing when the given hostname is ready to respond
with the given authorization.
"""
def stop_responding(server_name):
"""
Stop responding for a particular challenge.
May be a noop if a particular responder does not need or implement
explicit cleanup; implementations should not rely on this method always
being called.
:param str server_name: The server name to stop responding for: ie.
`u'<hex>.<hex>.acme.invalid'`.
"""
class ICertificateStore(Interface):
"""
A store of certificate/keys/chains.
"""
def get(self, server_name):
"""
Retrieve the current PEM objects for the given server name.
:param str server_name: The server name.
:raises KeyError: if the given name does not exist in the store.
:return: ``Deferred[List[:ref:`pem-objects`]]``
"""
def store(self, server_name, pem_objects):
"""
Store PEM objects for the given server name.
Implementations do not have to permit invoking this with a server name
that was not already present in the store.
:param str server_name: The server name to update.
:param pem_objects: A list of :ref:`pem-objects`; must contain exactly
one private key, a certificate corresponding to that private key,
and zero or more chain certificates.
:rtype: ``Deferred``
"""
def as_dict(self):
"""
Get all certificates in the store.
:rtype: ``Deferred[Dict[str, List[:ref:`pem-objects`]]]``
:return: A deferred firing with a dict mapping server names to
:ref:`pem-objects`.
"""
__all__ = ['ITLSSNI01Responder']
|
Python
| 0.000001
|
@@ -1220,1225 +1220,8 @@
%22%0A%0A%0A
-class ICertificateStore(Interface):%0A %22%22%22%0A A store of certificate/keys/chains.%0A %22%22%22%0A def get(self, server_name):%0A %22%22%22%0A Retrieve the current PEM objects for the given server name.%0A%0A :param str server_name: The server name.%0A%0A :raises KeyError: if the given name does not exist in the store.%0A%0A :return: %60%60Deferred%5BList%5B:ref:%60pem-objects%60%5D%5D%60%60%0A %22%22%22%0A%0A def store(self, server_name, pem_objects):%0A %22%22%22%0A Store PEM objects for the given server name.%0A%0A Implementations do not have to permit invoking this with a server name%0A that was not already present in the store.%0A%0A :param str server_name: The server name to update.%0A :param pem_objects: A list of :ref:%60pem-objects%60; must contain exactly%0A one private key, a certificate corresponding to that private key,%0A and zero or more chain certificates.%0A%0A :rtype: %60%60Deferred%60%60%0A %22%22%22%0A%0A def as_dict(self):%0A %22%22%22%0A Get all certificates in the store.%0A%0A :rtype: %60%60Deferred%5BDict%5Bstr, List%5B:ref:%60pem-objects%60%5D%5D%5D%60%60%0A :return: A deferred firing with a dict mapping server names to%0A :ref:%60pem-objects%60.%0A %22%22%22%0A%0A%0A
__al
|
358729ade26e9a8a101bd77d574d9f5e1f065b0d
|
Delete single question
|
relier/api/question.py
|
relier/api/question.py
|
from flask import abort, request, make_response
from relier.models import Event, Question, Answer
from relier.api import AuthenticatedResource
from datetime import datetime
from flask import g
class QuestionResource(AuthenticatedResource):
def post(self, event_id):
if not g.user.can_ask:
abort(403)
event = None
try:
body = request.json
content = body['content'].encode('utf-8')
except Exception:
abort(400)
if not content:
abort(400)
event = Event.get(Event.id == event_id)
if not event:
abort(404)
try:
question = Question.create(created=datetime.now(),
content=content, event=event)
except Exception:
abort(500)
response = make_response('', 201)
response.headers['Location'] = '/events/{id_}/questions/{question_id_}'.format(id_ = event.id, question_id_ = question.id)
return response
class QuestionInstance(AuthenticatedResource):
# Retrieve single Question
def get(self, event_id, question_id):
if Question.select().where(Question.id == question_id).count() == 0:
abort(404)
question = Question.get(Question.id == question_id)
return QuestionInstance.question_to_json(question)
def delete(self, event_id, question_id):
pass
@staticmethod
def question_to_json(question):
answer_json = ''
try:
answer = Answer.get(Answer.question == question)
answer_json = answer.JSON()
except Exception:
pass
return {
'id': question.id,
'content': question.content,
'created': question.created.strftime('%Y-%m-%d %H:%M'),
'updated': question.updated.strftime('%Y-%m-%d %H:%M') if question.updated else '',
'answer': answer_json
}
class AnswerResource(AuthenticatedResource):
def post(self, event_id, question_id):
if not g.user.can_answer:
abort(403)
try:
body = request.json
content = body['content'].encode('utf-8')
except Exception as e:
print e
abort(400)
question = Question.get(Question.id == question_id)
if not question:
abort(400)
answer = Answer.create( question = question,
created = datetime.now(),
content = content)
response = make_response('', 201)
response.headers['Location'] = '/events/{id_}/questions/{question_id_}/answers/{answer_id_}'.format(id_ = event_id, question_id_ = question.id, answer_id_ = answer.id)
return response
|
Python
| 0.999999
|
@@ -1421,24 +1421,25 @@
on_id):%0A
+%0A
pass%0A%0A
@@ -1430,20 +1430,450 @@
-pass
+if not g.user.is_admin: %0A abort(403)%0A%0A question = None%0A try:%0A question = Question.get(Question.id == question_id)%0A except Question.DoesNotExist:%0A abort(404)%0A%0A answer_delete_query = Answer.delete().where(Answer.question == question)%0A answer_delete_query.execute()%0A%0A question.delete_instance();%0A response = make_response('', 204)%0A return response
%0A%0A @s
|
8112440223e2e8e4f5d8cb93b28fd846dd59418b
|
Add logout view.
|
repocracy/repo/urls.py
|
repocracy/repo/urls.py
|
from django.conf.urls.defaults import *
from django.conf import settings
import os
urlpatterns = patterns('repocracy.repo.views',
url(r'^$', 'home', name='home'),
url(r'^claim/(?P<pk>\d+)/(?P<claim_hash>[a-fA-F\d]{40})/$', 'repo_claim', name='repo_claim'),
url(r'^users/(?P<name>[\-_\d\w\\\.]+)/$', 'repo_owner', name='repo_owner'),
url(r'^repos/(?P<name>[/\-_\d\w\\\.]+)/$', 'repo_detail', name='repo_detail'),
url(r'^post-receive/(?P<pk>\d+)/$', 'post_receive', name='post_receive'),
url(r'^status/(?P<pk>\d+)/$', 'repo_status', name='repo_status'),
)
urlpatterns += patterns('',
# Not a smart way to serve repos (very slow).
# Serve with nginx using static http, or preferably the CGI hgwebdir script
url(r'^hg(?P<path>.*)$', 'django.views.static.serve',
{'show_indexes': True, 'document_root': os.path.join(settings.REPOCRACY_BASE_REPO_PATH, 'public_hg')}),
)
|
Python
| 0
|
@@ -901,10 +901,102 @@
hg')%7D),%0A
+ url(r'%5Elogout/$', 'django.contrib.auth.views.logout', %7B'redirect_field_name': 'next'%7D),%0A
)%0A
|
4fba4af394f657918efe7bdd3c091f06d13892a6
|
Fix failures induced by MyCapytain 0.1.0
|
nautilus/response.py
|
nautilus/response.py
|
# -*- coding: utf-8 -*-
"""
Response generator for the queries
"""
from __future__ import unicode_literals
from six import text_type as str
import json
from collections import OrderedDict
from copy import copy
from MyCapytain.resources.inventory import TextInventory
from lxml import etree
JSON = "application/text"
XML = "text/xml"
CTS_XML = "text/xml:CTS"
MY_CAPYTAIN = "MyCapytain"
def getcapabilities(texts, page=None, count=None, format=XML, **kwargs):
""" Transform a list of texts into a string representation
:param texts: List of Text objects
:return: String representation of the Inventory
"""
inventory = TextInventory()
for text in texts:
tg_urn = str(text.parents[1].urn)
wk_urn = str(text.parents[0].urn)
txt_urn = str(text.urn)
if tg_urn not in inventory.textgroups:
# Use another variable to avoid pointer ?
# Try to see what is most optimized
inventory.textgroups[tg_urn] = copy(text.parents[1])
inventory.textgroups[tg_urn].works = OrderedDict()
if wk_urn not in inventory.textgroups[tg_urn].works:
inventory.textgroups[tg_urn].works[wk_urn] = copy(text.parents[0])
inventory.textgroups[tg_urn].works[wk_urn].parents = tuple(
[inventory, inventory.textgroups[tg_urn]]
)
inventory.textgroups[tg_urn].works[wk_urn].texts = OrderedDict()
__text = copy(text)
inventory.textgroups[tg_urn].works[wk_urn].texts[txt_urn] = __text
__text.parents = tuple([
inventory,
inventory.textgroups[tg_urn],
inventory.textgroups[tg_urn].works[wk_urn]
])
if format == JSON:
inventory_str = ""
elif format == CTS_XML:
return str(inventory)
else:
return """
<GetCapabilities xmlns="http://chs.harvard.edu/xmlns/cts">
<request>
<requestName>GetInventory</requestName>
<requestFilters>{filters}</requestFilters>
</request>
<reply>
{inventory}
</reply>
</GetCapabilities>""".format(
inventory=str(inventory),
filters=", ".join("{0}={1}".format(key, value) for key, value in kwargs.items() if value is not None)
)
def getpassage(passage, metadata, request_urn, format=XML):
if format == XML:
return """
<GetPassage xmlns:tei="http://www.tei-c.org/ns/1.0" xmlns="http://chs.harvard.edu/xmlns/cts">
<request>
<requestName>GetPassage</requestName>
<requestUrn>{request_urn}</requestUrn>
</request>
<reply>
<urn>{full_urn}</urn>
<passage>
<TEI xmlns="http://www.tei-c.org/ns/1.0">
<text>
<body>
<div type="{category}" n="{urn}" xml:lang="{lang}">{passage}</div>
</body>
</text>
</TEI>
</passage>
</reply>
</GetPassage>""".format(
request_urn=request_urn,
full_urn=str(passage.urn),
category=metadata.subtype.lower(),
urn=str(metadata.urn),
lang=metadata.lang,
passage=passage.tostring(encoding=str)
)
def getpassageplus(passage, metadata, request_urn, format=XML):
if format == XML:
return """
<GetPassage xmlns:tei="http://www.tei-c.org/ns/1.0" xmlns="http://chs.harvard.edu/xmlns/cts">
<request>
<requestName>GetPassage</requestName>
<requestUrn>{request_urn}</requestUrn>
</request>
<reply>
<urn>{full_urn}</urn>
<passage>
<TEI xmlns="http://www.tei-c.org/ns/1.0">
<text>
<body>
<div type="{category}" n="{urn}" xml:lang="{lang}">{passage}</div>
</body>
</text>
</TEI>
</passage>
<prevnext>
<prev><urn>{prev}</urn></prev>
<next><urn>{next}</urn></next>
</prevnext>
<label>
</label>
</reply>
</GetPassage>""".format(
request_urn=request_urn,
full_urn=str(passage.urn),
category=metadata.subtype.lower(),
urn=str(metadata.urn),
lang=metadata.lang,
passage=passage.tostring(encoding=str),
prev=passage.prev or "",
next=passage.next or ""
)
def getvalidreff(reffs, level, request_urn, format=XML):
if format == XML:
return """
<GetValidReff xmlns:tei="http://www.tei-c.org/ns/1.0" xmlns="http://chs.harvard.edu/xmlns/cts">
<request>
<requestName>GetValidReff</requestName>
<requestUrn>{request_urn}</requestUrn>
</request>
<reply>
<reff level="{level}">{reffs}</reff>
</reply>
</GetValidReff>""".format(
request_urn=request_urn,
reffs="".join(["<urn>{}</urn>".format(reff) for reff in reffs]),
level=level
)
|
Python
| 0.000001
|
@@ -138,17 +138,16 @@
as str%0A%0A
-%0A
import j
@@ -265,16 +265,60 @@
ventory%0A
+from MyCapytain.common.reference import URN%0A
from lxm
@@ -3558,32 +3558,261 @@
n, format=XML):%0A
+ _prev = None%0A _next = None%0A%0A if passage.prev:%0A _prev = URN(%22%7B%7D:%7B%7D%22.format(passage.urn%5B%22text%22%5D, str(passage.prev)))%0A if passage.next:%0A _next = URN(%22%7B%7D:%7B%7D%22.format(passage.urn%5B%22text%22%5D, str(passage.next)))%0A
if format ==
@@ -5052,24 +5052,17 @@
prev=
-passage.
+_
prev or
@@ -5082,24 +5082,17 @@
next=
-passage.
+_
next or
|
8b008968e92cabf1022dff6edb37f38c3aaa5214
|
Update merge_filter.py
|
uf_examples/courses/merge_filter.py
|
uf_examples/courses/merge_filter.py
|
#!/usr/bin/env/python
"""
merge_filter.py -- find the courses in VIVO, and match them to the courses in the source. They
must match on ccn
There are two inputs:
1. Courses in VIVO. Keyed by ccn
2. UF courses in the source. Keyed the same.
There are three cases
1. Course in VIVO and in Source => Update VIVO from source
1. Course in VIVO, not in source => nothing to do
1. Course not in VIVO, is in source => Add to VIVO
See CHANGELOG.md for history
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.02"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_vivo_ccn, get_parms
parms = get_parms()
data_in = read_csv_fp(sys.stdin)
print >>sys.stderr, len(data_in)
data_out = {}
vivo_courses = get_vivo_ccn(parms) # get dictionary of course uri keyed by ccn
print >>sys.stderr, 'VIVO courses', len(vivo_courses)
for row, data in data_in.items():
new_data = dict(data)
if data['ccn'] in vivo_courses: # ccn is in vivo and source
new_data['uri'] = vivo_courses[data['ccn']]
else: # key is in source, not in vivo
new_data['uri'] = ''
data_out[row] = new_data
print >>sys.stderr, 'data out', len(data_out)
write_csv_fp(sys.stdout, data_out)
|
Python
| 0.000001
|
@@ -559,9 +559,9 @@
201
-5
+6
(c)
|
672876c172d9bba9e2f29707f9fdd95e0ff10f9f
|
put data early in Redis at hourly recache
|
hortiradar/website/refresh_cache.py
|
hortiradar/website/refresh_cache.py
|
import argparse
from datetime import datetime
import flask
import ujson as json
from app import app, get_period
from hortiradar import time_format
from processing import get_cache_key, get_process_top_params, process_details, process_top, redis
def main():
parser = argparse.ArgumentParser(description="Refresh the cache for hortiradar analytics.")
parser.add_argument("--verbose", "-v", action="store_true")
args = parser.parse_args()
# bigger than usual time for when the hourly recache is too slow
cache_time = 120 * 60
groups = ["bloemen", "groente_en_fruit"]
get_time = lambda: datetime.now().strftime("%H:%M")
start_time = get_time()
max_amount = 10
group_data = []
for group in groups:
if args.verbose:
print("Caching group: {}".format(group))
arguments = (group, max_amount, get_process_top_params(group))
key = get_cache_key(process_top, *arguments)
data = process_top(*arguments, force_refresh=True, cache_time=cache_time)
group_data.append((key, data))
with app.test_request_context("/?period=week"):
_, start, end, _ = get_period(flask.request, "week")
params = {"start": start.strftime(time_format), "end": end.strftime(time_format)}
keyword_data = []
for (_, group) in group_data:
for keyword in group:
prod = keyword["label"]
if args.verbose:
print("Caching keyword: {}".format(prod))
key = get_cache_key(process_details, prod, params)
data = process_details(prod, params, force_refresh=True, cache_time=cache_time)
keyword_data.append((key, data))
end_time = get_time()
# Now populate the cache with the new data
for (key, data) in group_data + keyword_data:
redis.set(key, json.dumps(data), ex=cache_time)
sync_time = "{} - {}".format(start_time, end_time) if start_time != end_time else start_time
redis.set("sync_time", sync_time)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -1059,16 +1059,72 @@
, data))
+%0A redis.set(key, json.dumps(data), ex=cache_time)
%0A%0A wi
@@ -1320,30 +1320,8 @@
t)%7D%0A
- keyword_data = %5B%5D%0A
@@ -1666,173 +1666,8 @@
- keyword_data.append((key, data))%0A end_time = get_time()%0A%0A # Now populate the cache with the new data%0A for (key, data) in group_data + keyword_data:%0A
@@ -1715,24 +1715,50 @@
ache_time)%0A%0A
+ end_time = get_time()%0A
sync_tim
|
e77243ebd39eea6033b14d53ddeea870893548ae
|
Create tomato.py
|
tomato.py
|
tomato.py
|
#!/bin/python2.7
import argparse
import re
import random
import struct
from itertools import chain
#################
### ARGUMENTS ###
#################
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("file", help="input file")
parser.add_argument("-o", "--output", help="output file")
parser.add_argument('-m', "--mode", action='store', dest='modevalue',help='choose mode')
#parser.add_argument("-m", "--mode", type=string, choices=["ikill","iswap","bloom","pulse","shuffle"],help="defines script mode", default=False)
args = parser.parse_args()
filein = args.file
fileout = args.output
mode = args.modevalue
####################
### OPENING FILE ###
####################
#open .avi file as binary
with open (filein, 'rb') as f:
## split the content at "idx1"
a = f.read().split('idx1', 1)
a1 = a[1]
## get the length of the index and store it
a1, idxl = a1[4:], a1[:4]
## get the first iframe and store it
n = 16
iframe, a1 = a1[:n], a1[n:]
## put all frames in array
b = [a1[i:i+n] for i in range(0, len(a1), n)]
## take out all of the sound frames cuz who gives a fuck
sframeregex = re.compile(b'01wb\x10\x00\x00\x00.{8}')
b = [x for x in b if not re.match(sframeregex,x)]
## calculate number of frames
c = len(b)
#########################
### OPERATIONS TO IDX ###
#########################
### MODE - SHUFFLE
#####################
if mode == "shuffle":
g = random.sample(b,c)
### MODE - DELETE IFRAMES
###########################
if mode == "ikill":
iframeregex = re.compile(b'00dc\x10\x00\x00\x00.{8}')
g = [x for x in b if not re.match(iframeregex,x)]
### MODE - BLOOM
##################
if mode == "bloom":
## bloom options
frame = 150
repeat = 500
## split list
lista = b[:frame]
listb = b[frame:]
## rejoin list with bloom
g = lista + ([b[frame]]*repeat) + listb
### MODE - P PULSE
##################
if mode == "pulse":
pulselen = 20
pulseryt = 100
d = [[x for j in range(pulselen)] if not i%pulseryt else x for i,x in enumerate(b)]
e = [item for sublist in d for item in sublist]
f = ''.join(e)
g = [f[i:i+n] for i in range(0, len(f), n)]
##just having fun by adding this at the end of the bloom
#d = random.sample(d,c + repeat)
########################
### FIX INDEX LENGTH ###
########################
print "old index size : " + str(c + 1) + " frames"
hey = len(g)*16
print "new index size : " + str((hey/16) + 1) + " frames"
## convert it to packed data
idxl = struct.pack('<I',hey)
###################
### SAVING FILE ###
###################
## rejoin the whole thing
data = ''.join(a[0] + "idx1" + idxl + iframe + ''.join(g))
f = open(fileout, 'wb')
f.write(data)
f.close()
|
Python
| 0.999536
|
@@ -391,17 +391,16 @@
mode')%0A
-#
parser.a
@@ -415,131 +415,270 @@
ent(
-%22-m%22, %22--mode%22, type=string, choices=%5B%22ikill%22,%22iswap%22,%22bloom%22,%22pulse%22,%22shuffle%22%5D,help=%22defines script mode%22, default=False)
+'-c', action='store', dest='countframes',help='var1', default=1)%0Aparser.add_argument('-n', action='store', dest='positframes',help='var2', default=1)%0Aparser.add_argument('-s', action='store', dest='simple_value',%0A help='Store a simple value')%0A
%0Aarg
@@ -764,16 +764,78 @@
odevalue
+%0Acountframes = args.countframes%0Apositframes = args.positframes
%0A%0A######
@@ -1905,38 +1905,64 @@
ions
-%09
%0A%09%09
-frame = 150%0A%09%09repeat = 500
+repeat = int(countframes)%09%0A%09%09frame = int(positframes)
%0A%09%0A%09
@@ -2168,27 +2168,54 @@
n =
-20%0A%09%09pulseryt = 100
+int(countframes)%0A%09%09pulseryt = int(positframes)
%0A%09%0A%09
|
2e39edbfab0d1d70ca527a024210a15d357842b7
|
Fix non-existent import
|
elaboratecharts/__init__.py
|
elaboratecharts/__init__.py
|
from os import path
from shutil import copytree, rmtree
from flask.ext.assets import Environment, Bundle
from . import assets
from .views import elaboratecharts
def rel(p):
return path.join(path.dirname(__file__), p)
class ElaborateCharts(object):
def __init__(self, app=None, url_prefix=None):
if app is not None:
self.init_app(app, url_prefix=url_prefix)
def init_app(self, app, url_prefix=None):
app.register_blueprint(elaboratecharts, url_prefix=url_prefix)
self.init_assets(app, url_prefix=url_prefix)
def init_assets(self, app, url_prefix=None):
blueprint = app.blueprints['elaboratecharts']
env = Environment(app)
env.url = (url_prefix or '') + blueprint.static_url_path
env.directory = blueprint.static_folder
env.load_path = map(rel, [
'scss',
'coffee',
'bower_components',
])
js_bundle = Bundle(
Bundle(
'jquery/dist/jquery.js',
'bootstrap-sass-official/assets/javascripts/bootstrap.js',
'moment/min/moment-with-locales.js',
'highstock-release/highstock.src.js',
'ladda-bootstrap/dist/spin.js',
'ladda-bootstrap/dist/ladda.js',
'bluebird/js/browser/bluebird.js',
'lodash/dist/lodash.js',
('history.js/scripts/bundled-uncompressed/html4+html5/'
'jquery.history.js'),
'jquery.finger/dist/jquery.finger.js',
output='js_requirements.js'),
Bundle(
'index.coffee',
filters=['coffeescript'],
output='js_index.js'))
css_bundle = Bundle(
'all.scss',
filters=['scss'],
output='css_all.css')
env.config['sass_load_paths'] = map(rel, [
'bower_components/bootstrap-sass-official/assets/stylesheets/',
'bower_components/ladda-bootstrap/css/',
'bower_components/font-awesome/scss/',
])
# Copy fonts to static folder
static_fonts = path.join(env.directory, 'fonts')
try:
rmtree(static_fonts)
except OSError:
pass
copytree(rel('bower_components/font-awesome/fonts'),
static_fonts)
env.register('js_all', js_bundle)
env.register('css_all', css_bundle)
|
Python
| 0.999866
|
@@ -104,29 +104,8 @@
le%0A%0A
-from . import assets%0A
from
|
cba7a02effc41e212bcefa22d918d8c4728b3fe8
|
fix example formatting in docstrings
|
src/uptime_report/cli.py
|
src/uptime_report/cli.py
|
# -*- coding: utf-8 -*-
"""Uptime report CLI.
This module contains all CLI entrypoints. Command line argument parsing and
execution is implemented via `clize`_.
Examples::
$ python -m uptime_report.cli --version
.. _clize:
https://github.com/epsy/clize
"""
from __future__ import print_function, unicode_literals
import json
import logging
import sys
from enum import Enum
from operator import attrgetter
import arrow
from clize import errors, parameters, parser, run
from sigtools import modifiers, wrappers
from uptime_report._version import get_versions
from uptime_report.backends import get_backend, list_backends
from uptime_report.config import read_config, write_config
from uptime_report.outage import (encode_outage, get_downtime_in_seconds,
get_outages, print_outages,
write_outages_csv)
try:
import requests_cache
except ImportError:
requests_cache = None
DEFAULT_BACKEND = 'pingdom'
"""str: name of default backend module."""
@parser.value_converter
class Format(Enum):
"""Enumeration of existing output format types."""
TEXT = 'txt'
CSV = 'csv'
JSON = 'json'
DEFAULT_FORMAT = Format.TEXT
class TimeUnits(Enum):
"""Enumeration of time division abbreviations.
Attributes:
minutes (str): ``m``
hours (str): ``h``
days (str): ``d``
months (str): ``mo``
years (str): ``y``
"""
minutes = 'm'
hours = 'h'
days = 'd'
months = 'mo'
years = 'y'
@parser.value_converter
def get_time(value, now=None):
"""Convert a parameter to a timestamp.
Based on the passed value create a timestamp that represents the
value. Both absolute and relative forms are supported::
Example:
>>> get_time('2017-06-03')
1496448000
>>> now = arrow.utcnow().replace(microsecond=0).timestamp
>>> get_time('+2d') == now + 2*60*60*24
True
Valid time units for relative values are described in :class:`TimeUnits`.
If a time unit is not provided the default is :py:attr:`TimeUnits.days`.
Additionally, for relative values, the current time can be specified by
passing an :class:`~arrow.arrow.Arrow` instance as the ``now``
argument::
Example:
>>> today = get_time('2017-06-03')
>>> get_time('+1d', arrow.get(today))
1496534400
Args:
value (str): the value to convert
now (:obj:`~arrow.arrow.Arrow`, optional): the base time to use
for relative values.
Returns:
int: a timestamp
Raises:
clize.errors.CliValueError: if the value cannot be converted.
"""
now = arrow.utcnow() if not now else now.replace(microsecond=0)
if not value:
return now.timestamp
if value.startswith('-') or value.startswith('+'):
op = value[0]
val = value[1:]
try:
num = ''.join([c for c in val if c.isdigit()])
if len(num) < len(val):
unit = TimeUnits(val[len(num):])
else:
unit = TimeUnits('d')
d = now.replace(**{unit.name: int(op + num)})
return d.timestamp
except ValueError:
pass
try:
return arrow.get(value).timestamp
except arrow.parser.ParserError as e:
raise errors.CliValueError(e)
@parser.value_converter
def get_log_level(level):
"""Convert a value to a log level.
Converts a case-insensitive log level name to the corresponding
integer value from Python's :mod:`logging` package::
Example:
>>> assert logging.DEBUG == get_log_level('debug')
Args:
level (str): the value to convert
Returns:
int: a log level from the :mod:`logging` package.
Raises:
clize.errors.CliValueError: if the value cannot be converted.
"""
try:
return getattr(logging, level.upper())
except AttributeError:
raise errors.CliValueError(
'Invalid log level: {}'.format(level))
@wrappers.decorator
@modifiers.autokwoargs
@modifiers.annotate(log_level=get_log_level)
def with_common_args(
wrapped, log_level=None, use_cache=False, *args, **kwargs):
logging.basicConfig(level=log_level or logging.ERROR)
if use_cache:
if requests_cache:
requests_cache.install_cache()
else:
print("Cache disabled, missing requests-cache module.")
return wrapped(*args, **kwargs)
@wrappers.decorator
@modifiers.autokwoargs
def with_backend(wrapped, backend=DEFAULT_BACKEND, *args, **kwargs):
try:
config = read_config()[backend]
except KeyError:
raise errors.CliValueError(
"Missing configuration for backend {}".format(backend))
impl = get_backend(backend).from_config(config)
return wrapped(backend=impl, *args, **kwargs)
@wrappers.decorator
@modifiers.autokwoargs
@modifiers.annotate(start=get_time, finish=get_time)
def with_filters(
wrapped, start, finish, overlap=0, minlen=300,
*args, **kwargs):
filters = {
'start': start,
'finish': finish,
'overlap': overlap,
'minlen': minlen
}
return wrapped(filters=filters, *args, **kwargs)
@with_common_args
@with_filters
@with_backend
@modifiers.annotate(fmt=parameters.one_of(*map(attrgetter('value'), Format)))
def outages(filters=None, backend=None, fmt=DEFAULT_FORMAT):
"""List outages."""
outages = get_outages(backend, **filters)
if fmt == Format.JSON:
print(json.dumps(list(outages), indent=4, default=encode_outage))
elif fmt == Format.CSV:
write_outages_csv(sys.stdout, outages)
else:
print_outages(outages)
@with_common_args
@with_filters
@with_backend
def uptime(filters=None, backend=None):
"""Do the uptime reporting stuff."""
outages = get_outages(backend, **filters)
downtime = get_downtime_in_seconds(outages)
print(downtime)
def version():
"""Get the version of this program."""
return get_versions().get('version', 'unknown')
def backends():
"""Print supported backends."""
return "\n".join(list_backends())
def main(**kwargs):
"""Run the CLI application."""
run([uptime, outages, write_config], alt=[version, backends], **kwargs)
if __name__ == '__main__':
main()
|
Python
| 0.000009
|
@@ -1759,18 +1759,17 @@
upported
-::
+.
%0A%0A Ex
@@ -2256,20 +2256,16 @@
%60%60now%60%60
-%0A
argumen
@@ -2265,18 +2265,17 @@
argument
-::
+.
%0A%0A Ex
@@ -3587,18 +3587,17 @@
package
-::
+.
%0A%0A Ex
|
b6098d5b4578547fea192fe96998dbc43ef9dcb0
|
upgrade values check
|
http_lazy_headers/fields/upgrade.py
|
http_lazy_headers/fields/upgrade.py
|
# -*- coding: utf-8 -*-
from ..shared.utils import constraints
from ..shared import bases
def upgrade(name, version=None):
return name, version
class ProtocolName:
# http://www.iana.org/assignments/http-upgrade-tokens/http-upgrade-tokens.xml
http = 'HTTP'
tls = 'TLS'
web_socket = 'WebSocket'
h2c = 'h2c'
class Upgrade(bases.MultiHeaderBase):
"""
The ``Upgrade`` header field is intended to\
provide a simple mechanism for transitioning\
from HTTP/1.1 to some other protocol on the\
same connection. A client MAY send a list of\
protocols in the Upgrade header field of a\
request to invite the server to switch to\
one or more of those protocols, in order of\
descending preference, before sending the\
final response. A server MAY ignore a\
received Upgrade header field if it wishes\
to continue using the current protocol on\
that connection. Upgrade cannot be used to\
insist on a protocol change.
Example::
Upgrade([
upgrade(ProtocolName.http, '2.0')
])
Upgrade([
upgrade(ProtocolName.web_socket)
])
Upgrade([
('HTTP', '2.0'),
('SHTTP', '1.3'),
('IRC', '6.9'),
('RTA', 'x11')
])
`Ref. <http://httpwg.org/specs/rfc7230.html#header.upgrade>`_
"""
name = 'upgrade'
def value_str(self, value):
protocol, version = value
if version:
return '{}/{}'.format(protocol, version)
return protocol
def values_str(self, values):
return ', '.join(
self.value_str(v)
for v in values)
def clean_value(self, raw_value):
try:
protocol_name, protocol_version = raw_value.split('/', 1)
except ValueError:
constraints.must_be_token(raw_value) # Just name
return raw_value, None
else:
constraints.must_be_token(protocol_name)
constraints.must_be_token(protocol_version)
return protocol_name, protocol_version
|
Python
| 0.000001
|
@@ -57,16 +57,54 @@
traints%0A
+from ..shared.utils import assertions%0A
from ..s
@@ -1431,16 +1431,235 @@
grade'%0A%0A
+ def check_value(self, value):%0A assertions.must_be_tuple_of(value, 2)%0A protocol, version = value%0A assertions.must_be_token(protocol)%0A version is None or assertions.must_be_token(version)%0A%0A
def
|
471e0f4e91eb4513315193ce2b2b0f13e2c9724c
|
remove stray "
|
corehq/util/datadog/gauges.py
|
corehq/util/datadog/gauges.py
|
from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd
from corehq.util.soft_assert import soft_assert
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
""""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
soft_assert(fail_if_debug=True).call(
not enforce_prefix or name.split('.')[0] == enforce_prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task('background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
|
Python
| 0.000042
|
@@ -227,17 +227,16 @@
%0A %22%22%22
-%22
%0A hel
|
213ddc9ffbb171c17c051c6394baa0499abfc820
|
fix UnboundLocalError
|
corehq/util/tests/test_log.py
|
corehq/util/tests/test_log.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from django.test import SimpleTestCase
from ..log import clean_exception
class TestLogging(SimpleTestCase):
def test_bad_traceback(self):
result = "JJackson's SSN: 555-55-5555"
try:
# copied from couchdbkit/client.py
assert isinstance(result, dict), 'received an invalid ' \
'response of type %s: %s' % (type(result), repr(result))
except AssertionError as e:
pass
self.assertIn(result, six.text_type(e))
self.assertNotIn(result, six.text_type(clean_exception(e)))
def test_that_I_didnt_break_anything(self):
exception = AssertionError("foo")
cleaned_exception = clean_exception(exception)
self.assertEqual(exception.__class__, cleaned_exception.__class__)
self.assertEqual(six.text_type(exception), six.text_type(cleaned_exception))
|
Python
| 0.000002
|
@@ -276,16 +276,41 @@
5-5555%22%0A
+ exception = None%0A
@@ -556,12 +556,21 @@
-pass
+exception = e
%0A
@@ -611,16 +611,24 @@
t_type(e
+xception
))%0A
@@ -686,16 +686,24 @@
eption(e
+xception
)))%0A%0A
|
24b85059dcc5c17d21011bc7d1975f519e09837d
|
Improve formatting
|
netsecus/__init__.py
|
netsecus/__init__.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import imaplib
import logging
import time
import helper
import rules
# useful for debugging: $ openssl s_client -crlf -connect imap.gmail.com:993
#
# core functions
#
def main():
# patching imaplib
imaplib.Commands["MOVE"] = ("SELECTED",)
imaplib.Commands["IDLE"] = ("AUTH", "SELECTED",)
imaplib.Commands["DONE"] = ("AUTH", "SELECTED",)
helper.setupLogging()
imapmail = loginIMAP(helper.getConfigValue("login", "imapmail_server"), helper.getConfigValue(
"login", "mail_address"), helper.getConfigValue("login", "mail_password"))
imapmail._command("IDLE")
if "idling" in imapmail.readline().decode("utf-8"):
logging.debug("Server supports IDLE.")
firstRun = True
while True:
if firstRun or "EXISTS" in imapmail.readline().decode("utf-8"):
imapmail._command("DONE")
imapmail.readline()
ruleLoop(imapmail)
imapmail._command("IDLE")
logging.debug("Entering IDLE state.")
firstRun = False
else:
logging.debug("Server lacks support for IDLE... Falling back to delay.")
while True:
ruleLoop(imapmail)
time.sleep(helper.getConfigValue("settings", "delay"))
def ruleLoop(imapmail):
for rule in helper.getConfigValue("rules"):
processRule(imapmail, rule)
def processRule(imapmail, rule):
logging.debug("**** rule: '%s'" % rule["title"])
mails = []
for step in rule["steps"]:
logging.debug("* exec: %s" % step[0])
mails = getattr(rules, step[0])(imapmail, mails, *step[1:])
if not isinstance(mails, list):
mails = [mails]
if not mails:
logging.debug("* ret no mails")
break
logging.debug("* ret %d mail(s)" % len(mails))
logging.debug("**** done: '%s'" % rule["title"])
def loginIMAP(server, address, password):
imapmail = imaplib.IMAP4_SSL(server)
imapmail.login(address, password)
imapmail.select()
logging.info("IMAP login (%s on %s)" % (address, server))
return imapmail
if __name__ == "__main__":
main()
|
Python
| 0.99985
|
@@ -469,16 +469,25 @@
ginIMAP(
+%0A
helper.g
@@ -528,16 +528,24 @@
erver%22),
+%0A
helper.
@@ -559,25 +559,16 @@
igValue(
-%0A
%22login%22,
@@ -584,16 +584,24 @@
dress%22),
+%0A
helper.
|
10f3daa2a32b238e67a3ed380aaeec7f3c61cfb7
|
Fix for issue 84: po files not found when there's a two-letters named directory in the project's path.
|
rosetta/poutil.py
|
rosetta/poutil.py
|
import re, os, django
from django.conf import settings
from rosetta.conf import settings as rosetta_settings
from django.core.cache import cache
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
def find_pos(lang, project_apps = True, django_apps = False, third_party_apps = False):
"""
scans a couple possible repositories of gettext catalogs for the given
language code
"""
paths = []
# project/locale
parts = settings.SETTINGS_MODULE.split('.')
project = __import__(parts[0], {}, {}, [])
abs_project_path = os.path.normpath(os.path.abspath(os.path.dirname(project.__file__)))
if project_apps:
paths.append(os.path.abspath(os.path.join(os.path.dirname(project.__file__), 'locale')))
# django/locale
if django_apps:
django_paths = cache.get('rosetta_django_paths')
if django_paths is None:
django_paths = []
for root,dirnames,filename in os.walk(os.path.abspath(os.path.dirname(django.__file__))):
if 'locale' in dirnames:
django_paths.append(os.path.join(root , 'locale'))
continue
cache.set('rosetta_django_paths', django_paths, 60*60)
paths = paths + django_paths
# settings
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
paths.append(localepath)
# project/app/locale
for appname in settings.INSTALLED_APPS:
if rosetta_settings.EXCLUDED_APPLICATIONS and appname in rosetta_settings.EXCLUDED_APPLICATIONS:
continue
p = appname.rfind('.')
if p >= 0:
app = getattr(__import__(appname[:p], {}, {}, [appname[p+1:]]), appname[p+1:])
else:
app = __import__(appname, {}, {}, [])
apppath = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(app.__file__), 'locale')))
# django apps
if 'contrib' in apppath and 'django' in apppath and not django_apps:
continue
# third party external
if not third_party_apps and abs_project_path not in apppath:
continue
# local apps
if not project_apps and abs_project_path in apppath:
continue
if os.path.isdir(apppath):
paths.append(apppath)
ret = set()
rx=re.compile(r'(\w+)/../\1')
langs = (lang,)
if u'-' in lang:
_l,_c = map(lambda x:x.lower(),lang.split(u'-'))
langs += (u'%s_%s' %(_l, _c), u'%s_%s' %(_l, _c.upper()), )
elif u'_' in lang:
_l,_c = map(lambda x:x.lower(),lang.split(u'_'))
langs += (u'%s-%s' %(_l, _c), u'%s-%s' %(_l, _c.upper()), )
for path in paths:
for lang_ in langs:
dirname = rx.sub(r'\1', '%s/%s/LC_MESSAGES/' %(path,lang_))
for fn in ('django.po','djangojs.po',):
if os.path.isfile(dirname+fn):
ret.add(os.path.abspath(dirname+fn))
return list(ret)
def pagination_range(first,last,current):
r = []
r.append(first)
if first + 1 < last: r.append(first+1)
if current -2 > first and current -2 < last: r.append(current-2)
if current -1 > first and current -1 < last: r.append(current-1)
if current > first and current < last: r.append(current)
if current + 1 < last and current+1 > first: r.append(current+1)
if current + 2 < last and current+2 > first: r.append(current+2)
if last-1 > first: r.append(last-1)
r.append(last)
r = list(set(r))
r.sort()
prev = 10000
for e in r[:]:
if prev + 1 < e:
try:
r.insert(r.index(e), '...')
except ValueError:
pass
prev = e
return r
|
Python
| 0
|
@@ -3,12 +3,8 @@
port
- re,
os,
@@ -2517,42 +2517,8 @@
t()%0A
- rx=re.compile(r'(%5Cw+)/../%5C1')%0A
@@ -2833,24 +2833,65 @@
)%0A %0A
+ paths = map(os.path.normpath, paths)%0A
for path
@@ -2955,56 +2955,47 @@
e =
-rx.sub(r'%5C1', '%25s/%25s/LC_MESSAGES/' %25(path,lang_)
+os.path.join(path, lang_, 'LC_MESSAGES'
)%0A
@@ -3044,16 +3044,69 @@
.po',):%0A
+ filename = os.path.join(dirname, fn)%0A
@@ -3131,26 +3131,24 @@
.isfile(
-dir
+file
name
-+fn
):%0A
@@ -3190,18 +3190,16 @@
ath(
-dir
+file
name
-+fn
))%0A
|
c492c42639f7a487dc27a95a5a785dd9c62ecdb7
|
Change project status formatting
|
clowder/utility/print_utilities.py
|
clowder/utility/print_utilities.py
|
"""Print utilities"""
import os
from termcolor import colored
from clowder.utility.git_utilities import (
git_current_sha,
git_current_branch,
git_is_detached,
git_is_dirty
)
def print_project_status(root_directory, path, name):
"""Print repo status"""
repo_path = os.path.join(root_directory, path)
git_path = os.path.join(repo_path, '.git')
if not os.path.isdir(git_path):
return
if git_is_dirty(repo_path):
color = 'red'
symbol = '*'
else:
color = 'green'
symbol = ''
project_output = colored(symbol + name, color)
if git_is_detached(repo_path):
current_ref = git_current_sha(repo_path)
current_ref_output = colored('(HEAD @ ' + current_ref + ')', 'magenta')
else:
current_branch = git_current_branch(repo_path)
current_ref_output = colored('(' + current_branch + ')', 'magenta')
path_output = colored(path, 'cyan')
print(project_output + ' @ ' + path_output)
print(current_ref_output)
|
Python
| 0
|
@@ -973,30 +973,8 @@
tput
- + ' @ ' + path_output
)%0A
@@ -995,14 +995,34 @@
t_ref_output
+ + ' ' + path_output
)%0A
|
b82cc5adba91610093621fefc5121393d7a8bd35
|
Split ignored_paths
|
coalib/parsing/DefaultArgParser.py
|
coalib/parsing/DefaultArgParser.py
|
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from coalib.misc.i18n import _
default_arg_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
default_arg_parser.add_argument('-f', '--files', nargs='+', metavar='FILE', dest='allowed_files',
help=_('Files that should be checked'))
default_arg_parser.add_argument('-D', '--flat-dirs', nargs='+', metavar='DIR', dest='flat_directories',
help=_('Directories of files that should be checked, excluding sub directories'))
default_arg_parser.add_argument('-d', '--rec-dirs', nargs='+', metavar='DIR', dest='recursive_directories',
help=_('Directories of files that should be checked, including sub directories'))
default_arg_parser.add_argument('-t', '--allowed', nargs='+', metavar='TYPE', dest='allowed_file_types',
help=_('File types of files to be checked'))
default_arg_parser.add_argument('-F', '--forbidden', nargs='+', metavar='TYPE', dest='forbidden_file_types',
help=_('File types not to be checked'))
default_arg_parser.add_argument('-i', '--ignored', nargs='+', metavar='PATH', dest='ignored_paths',
help=_('Files or directories that should be ignored'))
default_arg_parser.add_argument('-B', '--bear-dirs', nargs='+', metavar='DIR', dest='bear-directories',
help=_('Directories to look in for bears'))
default_arg_parser.add_argument('-b', '--bears', nargs='+', metavar='NAME', dest='bears',
help=_('Names of bears to use'))
default_arg_parser.add_argument('-I', '--ignored_bears', nargs='+', metavar='REGEX', dest='ignored_bears',
help=_('Names of bears not to use'))
default_arg_parser.add_argument('-r', '--regex-bears', nargs='+', metavar='REGEX', dest='regex_bears',
help=_('Regular expressions matching bears to use'))
default_arg_parser.add_argument('-l', '--log', nargs=1, choices=['CONSOLE', 'TXT', 'HTML'], metavar='ENUM',
dest='log_type', help=_("Enum('CONSOLE','TXT','HTML') to determine type of logging"))
default_arg_parser.add_argument('-L', '--log_level', nargs=1, choices=['ERR', 'WARN', 'INFO', 'DEBUG'],
metavar='ENUM', dest='log_level',
help=_("Enum('ERR','WARN','INFO','DEBUG') to set level of log output"))
default_arg_parser.add_argument('-o', '--output', nargs=1, metavar='FILE', dest='output',
help=_('Location of lot output'))
default_arg_parser.add_argument('-c', '--config', nargs='+', metavar='FILE', dest='config',
help=_('Configuration file to be used'))
default_arg_parser.add_argument('-s', '--save', nargs='?', const=True, metavar='FILE', dest='save',
help=_('Filename of file to be saved to, defaults to config file'))
default_arg_parser.add_argument('-j', '--job-count', nargs=1, type=int, metavar='INT', dest='job_count',
help=_('Number of processes to be allowed to run at once'))
default_arg_parser.add_argument('-C', '--apply-changes', nargs=1, choices=['YES', 'NO', 'ASK'], metavar='ENUM',
dest='apply_changes', help=_("Enum('YES','NO','ASK') to set whether to apply changes"))
|
Python
| 0
|
@@ -1799,16 +1799,22 @@
-ignored
+-files
', nargs
@@ -1853,12 +1853,12 @@
red_
-path
+file
s',%0A
@@ -1907,12 +1907,179 @@
les
-or d
+that should be ignored'))%0Adefault_arg_parser.add_argument('-p', '--ignored-dirs', nargs='+', metavar='PATH', dest='ignored_dirs',%0A help=_('D
irec
|
d4f86c8b9ced020f842a4321b4108c9372d7b4ec
|
Add the staticfiles app.
|
coda/coda_project/settings/base.py
|
coda/coda_project/settings/base.py
|
# Base settings for coda_project
import os
import json
from datetime import timedelta
from django.core.exceptions import ImproperlyConfigured
# Absolute path to the settings module
SETTINGS_ROOT = os.path.dirname(__file__)
# Absolute path to the project
PROJECT_ROOT = os.path.dirname(SETTINGS_ROOT)
# Absolute path to the site directory
SITE_ROOT = os.path.dirname(PROJECT_ROOT)
# Compose a path from the project root
project_path = lambda path: os.path.join(PROJECT_ROOT, path)
# Compose path from the site root
site_path = lambda path: os.path.join(SITE_ROOT, path)
# Get our secrets from a file outside of version control.
# This helps to keep the settings files generic.
with open(os.path.join(SETTINGS_ROOT, "secrets.json")) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
try:
return secrets[setting]
except KeyError:
error_msg = "The {0} secret is not set.".format(setting)
raise ImproperlyConfigured(error_msg)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MAINTENANCE_MSG = get_secret("MAINTENANCE_MSG")
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
MEDIA_ROOT = site_path('media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
SECRET_KEY = get_secret("SECRET_KEY")
DATABASES = {
'default': {
'NAME': get_secret("DB_NAME"),
'USER': get_secret("DB_USER"),
'ENGINE': 'django.db.backends.mysql',
'PASSWORD': get_secret("DB_PASSWORD"),
'HOST': get_secret("DB_HOST"),
'PORT': get_secret("DB_PORT"),
}
}
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',)
ROOT_URLCONF = 'coda_project.urls'
TEMPLATE_DIRS = (
site_path('templates'),)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',)
# Let the view know if we are in "proxy mode" or not.
# this uses the coda instance as a reverse proxy for the archival storage nodes
# setting to false sends requests directly to the archival servers.
CODA_PROXY_MODE = False
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.admindocs',
'django.contrib.admin',
'django.contrib.humanize',)
THIRD_PARTY_APPS = (
'premis_event_service',
)
LOCAL_APPS = (
'coda_mdstore',
'coda_replication',
'coda_oaipmh',
'coda_validate',)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
VALIDATION_PERIOD = timedelta(days=365)
|
Python
| 0
|
@@ -248,17 +248,16 @@
project
-
%0APROJECT
@@ -1153,100 +1153,93 @@
ue%0A%0A
-MEDIA_ROOT = site_path('media')%0A%0AMEDIA_URL = '/media/'%0A%0AADMIN_MEDIA_PREFIX = '/media/admin/'
+STATIC_URL = '/static/'%0A%0ASTATICFILES_DIRS = %5B%0A os.path.join(SITE_ROOT, 'static')%5D%0A
%0A%0ASE
@@ -2523,24 +2523,58 @@
.sessions',%0A
+ 'django.contrib.staticfiles',%0A
'django.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.