commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
9b2cc65a792eb850d982653100ac948990904125
|
Display microseconds in integer decimal
|
appstats/filters.py
|
appstats/filters.py
|
# encoding: utf-8
import json
def json_filter(value):
return json.dumps(value)
def count_filter(value):
if value is None:
return ""
count = float(value)
base = 1000
prefixes = [
('K'),
('M'),
('G'),
('T'),
('P'),
('E'),
('Z'),
('Y')
]
if count < base:
return "%.1f" % count
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if count < unit:
return "%.1f %s" % ((base * count / unit), prefix)
return "%.1f %s" % ((base * count / unit), prefix)
def time_filter(value):
if value is None:
return ""
# Transform secs into microseconds
time = float(value) * 1000000
if time < 1000:
return u"%.1f µs" % time
else:
time /= 1000
if time < 1000:
return "%.1f ms" % time
else:
time /= 1000
if time < 60:
return "%.1f s" % time
else:
time /= 60
if time < 60:
return "%.1f m" % time
else:
time /= 60
if time < 24:
return "%.1f h" % time
else:
time /= 24
return "%.1f d" % time
def default_filter(value):
if value is None:
return ""
return "%.1f" % float(value)
|
Python
| 0.99978
|
@@ -803,19 +803,17 @@
turn u%22%25
-.1f
+d
%C2%B5s%22 %25 t
|
f07a05f6a6edd0ef481dd9a24c1556b345fe7686
|
Remove attempt to import module that no longer exists
|
iati/tests/conftest.py
|
iati/tests/conftest.py
|
"""Configuration to exist in the global scope for pytest."""
import collections
import pytest
import iati.default
import iati.resources
import iati.tests.utilities
import iati
pytest_plugins = [ # name required by pytest # pylint: disable=invalid-name
'iati.tests.fixtures.comparison',
'iati.tests.fixtures.utility',
'iati.tests.fixtures.versions'
]
def _check_latest_version_mark(item):
"""Check that functions marked as supporting the latest version of the IATI Standard have been updated."""
latest_version_marker = item.get_marker('latest_version')
if latest_version_marker is not None:
latest_version = iati.Version(latest_version_marker.args[0])
assert latest_version == iati.version.STANDARD_VERSION_LATEST
def pytest_runtest_call(item):
"""Run operations that are called when tests are run."""
_check_latest_version_mark(item)
@pytest.fixture(params=[
('2.02', 62), # There are 38 embedded codelists at v2.02, plus 24 non-embedded codelists (which are valid for any version)
('2.01', 61), # There are 37 embedded codelists at v2.01, plus 24 non-embedded codelists (which are valid for any version)
('1.05', 59), # There are 35 embedded codelists at v1.05, plus 24 non-embedded codelists (which are valid for any version)
('1.04', 59) # There are 35 embedded codelists at v1.04, plus 24 non-embedded codelists (which are valid for any version)
])
def codelist_lengths_by_version(request): # latest_version fixture used to perform checks when adding new versions # pylint: disable=unused-argument
"""Return a tuple containing versions of the Standard, and the number of Codelists for that version.
Format: `(version=[standardVersion], expected_length=[numCodelists])`
"""
request.applymarker(pytest.mark.latest_version('2.02'))
output = collections.namedtuple('output', 'version expected_length')
return output(version=request.param[0], expected_length=request.param[1])
@pytest.fixture
def schema_ruleset():
"""Return a schema with the Standard Ruleset added.
Returns:
A valid Activity Schema with the Standard Ruleset added.
Todo:
Stop this being fixed to 2.02.
"""
schema = iati.default.activity_schema('2.02', False)
ruleset = iati.default.ruleset('2.02')
schema.rulesets.add(ruleset)
return schema
|
Python
| 0.000195
|
@@ -291,43 +291,8 @@
n',%0A
- 'iati.tests.fixtures.utility',%0A
|
57451e92f368a7150f66ccfd138be8bd6a0c43fa
|
disable filtering bug
|
ielex/lexicon/views.py
|
ielex/lexicon/views.py
|
from textwrap import dedent
import time
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
# from django.shortcuts import render_to_response
from ielex.lexicon.models import *
from ielex.shortcuts import render_template
from ielex.views import get_sort_order
from ielex.views import ChooseNexusOutputForm
def list_nexus(request):
form = ChooseNexusOutputForm()
return render_template(request, "nexus_list.html", {"form":form})
@login_required
def write_nexus(request): #, language_list=None):
# TODO this still ignores the reliability rating
start_time = time.time()
assert request.method == 'POST'
# Create the HttpResponse object with the appropriate header.
response = HttpResponse(mimetype='text/plain')
# response['Content-Disposition'] = 'attachment; filename=ielex.nex'
# get data together
#form = ChooseNexusOutputForm(request.POST)
language_list_id = request.POST["language_list"]
languages = Language.objects.filter(
id__in=LanguageList.objects.get(
id=language_list_id).language_id_list).order_by(get_sort_order(request))
language_names = ["'"+name+"'" for name in
languages.values_list("ascii_name", flat=True)]
meaning_list_id = request.POST["meaning_list"]
meanings = Meaning.objects.filter(id__in=MeaningList.objects.get(
id=meaning_list_id).meaning_id_list)
max_len = max([len(l) for l in language_names])
reliability = request.POST.getlist["reliability"]
cognate_class_ids = CognateSet.objects.all().values_list("id", flat=True)
data = {}
for cc in cognate_class_ids:
language_ids = CognateSet.objects.get(id=cc).lexeme_set.filter(
meaning__in=meanings).values_list('language', flat=True)
# something like the following gets reliability ratings too:
# [cj.lexeme.id for cj in cs.cognatejudgement_set.all() if
# (cj.reliability_ratings & reliability) and (cj.lexeme.meaning in
# meanings)]
if language_ids:
data[cc] = language_ids
# print out response
print>>response, dedent("""\
#NEXUS
[ Citation: ]
[ Dunn, Michael; Ludewig, Julia. 2009. IELex (Indo-European ]
[ Lexicon) Database. Max Planck Institute for Psycholinguistics, ]
[ Nijmegen. ]
""")
print>>response, "[ Language list: %s ]" % LanguageList.objects.get(
id=language_list_id).name
print>>response, "[ Meaning list: %s ]" % MeaningList.objects.get(
id=meaning_list_id).name
print>>response, "[ Reliability: %s ]" %", ".join(
request.POST.getlist("reliability"))
print>>response, "[ File generated: %s ]\n" % time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime())
print>>response, dedent("""\
begin taxa;
dimensions ntax=%s;
taxlabels %s;
end;
""" % (len(languages), " ".join(language_names)))
print>>response, dedent("""\
begin characters;
dimensions nchar=%s;
format symbols="01";
matrix""" % len(data))
for language in languages:
row = []
for cc in sorted(data):
if language.id in data[cc]:
row.append("1")
else:
row.append("0")
print>>response, " '%s'%s%s" % (language.ascii_name,
" "*(max_len - len(language.ascii_name)), "".join(row))
print>>response, " ;\nend;"
# timing
seconds = int(time.time() - start_time)
minutes = seconds // 60
seconds %= 60
print>>response, "[ Processing time: %02d:%02d ]" % (minutes, seconds)
return response
|
Python
| 0.000001
|
@@ -1484,16 +1484,18 @@
s%5D)%0A%0A
+ #
reliabi
@@ -1521,17 +1521,17 @@
.getlist
-%5B
+(
%22reliabi
@@ -1535,17 +1535,17 @@
ability%22
-%5D
+)
%0A%0A co
@@ -2711,32 +2711,34 @@
ist_id).name%0A
+ #
print%3E%3Eresponse
@@ -2772,24 +2772,26 @@
%22.join(%0A
+ #
req
|
44687242b3c99a08c903ef3d30685814241eb964
|
add clean up method to remove test images.
|
imager_images/tests.py
|
imager_images/tests.py
|
import factory
from django.test import TestCase
from imager_images.models import Album, Photo
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
import datetime
import os
THE_FILE = SimpleUploadedFile('test.png', 'a photo')
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username', )
username = 'john'
class PhotoTestCase(TestCase):
def setUp(self):
UserFactory()
def test_photo_has_user(self):
user_john = User.objects.get(username='john')
photo1 = Photo()
photo1.user = user_john
photo1.image = THE_FILE
photo1.save()
self.assertEquals(Photo.objects.all()[0].user, user_john)
def test_photo_metadata(self):
user_john = User.objects.get(username='john')
photo1 = Photo()
photo1.user = user_john
photo1.image = THE_FILE
photo1.title = "Image Title"
photo1.description = "An Image"
photo1.published = "pvt"
photo1.save()
the_photo = Photo.objects.all()[0]
self.assertEquals(the_photo.title, "Image Title")
self.assertEquals(the_photo.description, "An Image")
self.assertEquals(the_photo.published, "pvt")
self.assertEquals(the_photo.date_uploaded, datetime.date.today())
self.assertEquals(the_photo.date_modified, datetime.date.today())
class AlbumTestCase(TestCase):
def setUp(self):
UserFactory()
UserFactory(username='jane')
self.file = SimpleUploadedFile('test.png', 'a photo')
def test_album_owner(self):
user_john = User.objects.get(username='john')
album1 = Album()
album1.user = user_john
album1.save()
self.assertEqual(Album.objects.all()[0].user, user_john)
def test_photo_in_album(self):
user_john = User.objects.get(username='john')
photo1 = Photo()
photo1.user = user_john
photo1.image = THE_FILE
album1 = Album()
album1.user = user_john
album1.save()
photo1.save()
photo1.albums.add(album1)
self.assertIn(photo1, album1.photos.all())
def test_photos_in_album(self):
user_john = User.objects.get(username='john')
photo1 = Photo()
photo1.user = user_john
photo1.image = THE_FILE
photo2 = Photo()
photo2.user = user_john
photo2.image = THE_FILE
album1 = Album()
album1.user = user_john
album1.save()
photo1.save()
photo2.save()
photo1.albums.add(album1)
photo2.albums.add(album1)
self.assertIn(photo1, album1.photos.all())
self.assertIn(photo2, album1.photos.all())
def test_photo_in_multiple_albums(self):
user_john = User.objects.get(username='john')
photo1 = Photo()
photo1.user = user_john
photo1.image = THE_FILE
album1 = Album()
album1.user = user_john
album2 = Album()
album2.user = user_john
album1.save()
album2.save()
photo1.save()
photo1.albums.add(album1)
photo1.albums.add(album2)
self.assertIn(photo1, album1.photos.all())
self.assertIn(photo1, album2.photos.all())
def test_album_metadata(self):
user_john = User.objects.get(username='john')
album1 = Album()
album1.user = user_john
album1.title = "Album Title"
album1.description = "An Album"
album1.published = "pvt"
album1.save()
the_photo = Album.objects.all()[0]
self.assertEquals(the_photo.title, "Album Title")
self.assertEquals(the_photo.description, "An Album")
self.assertEquals(the_photo.published, "pvt")
self.assertEquals(the_photo.date_uploaded, datetime.date.today())
self.assertEquals(the_photo.date_modified, datetime.date.today())
# os.remove('media/imager_images/test*')
|
Python
| 0
|
@@ -218,16 +218,28 @@
mport os
+%0Aimport glob
%0A%0ATHE_FI
@@ -283,24 +283,126 @@
a photo')%0A%0A%0A
+def clean_up():%0A for file in glob.glob(%22media/imager_images/test*.png%22):%0A os.remove(file)%0A%0A%0A
class UserFa
@@ -884,32 +884,51 @@
user, user_john)
+%0A clean_up()
%0A%0A def test_p
@@ -1587,16 +1587,35 @@
oday())%0A
+ clean_up()%0A
%0A%0Aclass
@@ -2011,16 +2011,35 @@
er_john)
+%0A clean_up()
%0A%0A de
@@ -2387,32 +2387,51 @@
m1.photos.all())
+%0A clean_up()
%0A%0A def test_p
@@ -2968,32 +2968,51 @@
m1.photos.all())
+%0A clean_up()
%0A%0A def test_p
@@ -3534,16 +3534,35 @@
s.all())
+%0A clean_up()
%0A%0A de
@@ -4209,45 +4209,15 @@
-# os.remove('media/imager_images/test*'
+clean_up(
)%0A
|
0c160c8e787a9019571f358b70633efa13cad466
|
Support for inbox.util.eas in the /inbox-eas repo; this is where EAS-specific util code would live.
|
inbox/util/__init__.py
|
inbox/util/__init__.py
|
""" Non-server-specific utility modules. These shouldn't depend on any code
from the inbox module tree!
Don't add new code here! Find the relevant submodule, or use misc.py if
there's really no other place.
"""
|
Python
| 0
|
@@ -217,8 +217,115 @@
ce.%0A%22%22%22%0A
+# Allow out-of-tree submodules.%0Afrom pkgutil import extend_path%0A__path__ = extend_path(__path__, __name__)%0A
|
6c28b693fdcf6a1dc481b486c6c6233ae08d72e1
|
exclude thread itself from duplicates search when saving edits
|
askapp/forms.py
|
askapp/forms.py
|
from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
from registration.forms import RegistrationFormTermsOfService
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import Profile, Thread, Post
class RecaptchaRegistrationForm(RegistrationFormTermsOfService):
captcha = ReCaptchaField(widget=ReCaptchaWidget())
form_control = {'class': 'form-control'}
class ProfileForm(forms.ModelForm):
is_active = forms.BooleanField(required=False)
class Meta:
model = Profile
fields = ('avatar', 'country', 'city', 'about')
widgets = {
'country': forms.Select(attrs=form_control),
'city': forms.TextInput(attrs={'class': 'form-control', 'placeholder': _('Enter your city')}),
'about': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),
}
class AskappClearableFileInput(forms.widgets.ClearableFileInput):
template_with_initial = (
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
class ThreadForm(forms.ModelForm):
class Meta:
model = Thread
fields = ('thread_type', 'original', 'link', 'title', 'text', 'tags', 'image')
widgets = {
'original': forms.TextInput(),
'image': AskappClearableFileInput()
}
error_messages = {
'original': {
'invalid_choice': _('This thread is not found'),
},
}
def __init__(self, user, *args, **kwargs):
super(ThreadForm, self).__init__(*args, **kwargs)
if self.instance and not self.instance.id:
self.instance.user = user
elif not user.is_staff:
self.fields.pop('thread_type')
def clean(self):
cleaned_data = super(ThreadForm, self).clean()
link = cleaned_data.get("link")
thread_type = cleaned_data.get("thread_type")
#if thread_type and self.initial.get('thread_type', thread_type) != thread_type and not self.user.is_staff and not self.has_error('title'):
# self.add_error('title', 'You are not allowed to change the thread type')
if thread_type in Thread.TYPES_WITH_LINK and not self.has_error('link'):
if not link:
msg = _("This field is required")
self.add_error('link', msg)
else:
youtube_info = Thread(link=link).parse_youtube_url()
if youtube_info:
exists = Thread.objects.filter(link__contains=youtube_info['id'])
if len(exists):
msg = _("Sorry, someone has already posted this video")
self.add_error('link', msg)
elif thread_type == Thread.YOUTUBE:
msg = _("This is not a Youtube URL")
self.add_error('link', msg)
elif self.has_error('link') and thread_type not in Thread.TYPES_WITH_LINK:
del self.errors['link']
class ReplyForm(forms.ModelForm):
is_answer = forms.BooleanField(required=False)
class Meta:
model = Post
fields = ('text', 'is_answer')
widgets = {
'text': forms.Textarea(attrs={'class': 'form-control', 'rows': 5}),
}
|
Python
| 0
|
@@ -2602,16 +2602,45 @@
o%5B'id'%5D)
+.exclude(id=self.instance.id)
%0A
|
933a082a76c6c9b72aaf275f45f0d155f66eeacf
|
Fix Python 3.3 calling another virtualenv as a subprocess.
|
asv/__init__.py
|
asv/__init__.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
|
Python
| 0.000004
|
@@ -191,8 +191,289 @@
terals)%0A
+%0Aimport sys%0A%0Aif sys.version_info %3E= (3, 3):%0A # OS X framework builds of Python 3.3 can not call other 3.3%0A # virtualenvs as a subprocess because %60__PYENV_LAUNCHER__%60 is%0A # inherited.%0A if os.environ.get('__PYVENV_LAUNCHER__'):%0A os.unsetenv('__PYVENV_LAUNCHER__')%0A
|
3eddbd56328e245a1f952dccbe38d121657640c3
|
Add ends_with flag to fabfile if we want it to end early.
|
auto/fabfile.py
|
auto/fabfile.py
|
from fabric.api import *
from ssh_util import *
from collections import OrderedDict
import os, sys, json
VERBOSE = False
TASKS = [
('local', ['dump_api']),
('remote', ['parse_api', '-m 8']),
('remote', ['scrape', '-m 8']),
('local', ['download']),
('remote', ['extract']),
('local', ['create_dockets']),
('remote', ['scrape_dockets', '-m 8'])
]
ADMINS = []
EMAIL_SENDER = ''
EMAIL_API_KEY = ''
LOCK_DIR = '/tmp'
try:
from local_settings import *
except:
pass
def send_email(recipients, subject, message):
from postmark import PMMail
message = PMMail(
to = ','.join(recipients),
subject = '[regs] %s' % subject,
text_body = message,
api_key = EMAIL_API_KEY,
sender = EMAIL_SENDER
)
message.send(test=False)
def run_local(command):
os.chdir(os.path.expanduser('~/regulations-scraper/regscrape'))
out = local(' '.join([sys.executable, command]), capture=True)
return out
def run_remote(command):
with cd('~/regulations-scraper/regscrape'):
with prefix('source ~/.virtualenvs/scraper/bin/activate'):
return run(command)
def handle_completion(message, results):
output = '%s\nComplete results:\n%s' % (message, json.dumps(results, indent=4))
print output
if ADMINS:
send_email(ADMINS, message, output)
def acquire_lock():
lock_path = os.path.join(LOCK_DIR, 'regs.lock')
if os.path.exists(lock_path):
raise RuntimeError("Can't acquire lock.")
else:
lock = open(lock_path, 'w')
lock.write(str(os.getpid()))
lock.close()
def release_lock():
lock_path = os.path.join(LOCK_DIR, 'regs.lock')
os.unlink(lock_path)
@hosts(ssh_config('scraper'))
def run_regs(start_with='dump_api'):
try:
# use a lock file to keep multiple instances from trying to run simultaneously, which, among other things, consumes all of the memory on the high-CPU instance
acquire_lock()
except:
print 'Unable to acquire lock.'
if ADMINS:
send_email(ADMINS, "Aborting: can't acquire lock", "Can't start processing due to inability to acquire lock.")
sys.exit(1)
tasks = TASKS[[i for i in range(len(TASKS)) if TASKS[i][1][0] == start_with][0]:] # eep! finds the thing to start with, then takes the subset of TASKS from then on
runners = {
'remote': run_remote,
'local': run_local
}
results = OrderedDict()
for func, command in tasks:
try:
output = runners[func](' '.join(['./run.py'] + command + ['--parsable']))
try:
results[command[0]] = json.loads(output)
except ValueError:
results[command[0]] = 'unable to decode results'
if VERBOSE and ADMINS:
send_email(ADMINS, 'Results of %s' % command[0], 'Results of %s:\n%s' % (command[0], json.dumps(results[command[0]], indent=4)))
except SystemExit:
results[command[0]] = 'failed'
handle_completion('Aborting at step: %s' % command[0], results)
sys.exit(1)
handle_completion('All steps completed.', results)
release_lock()
|
Python
| 0
|
@@ -1775,16 +1775,43 @@
ump_api'
+, end_with='scrape_dockets'
):%0A t
@@ -2240,22 +2240,25 @@
-tasks = TASKS%5B
+first_task_idx =
%5Bi f
@@ -2322,92 +2322,144 @@
%5D%5B0%5D
-:%5D # eep! finds the thing to start with, then takes the subset of TASKS from then on
+%0A last_task_idx = %5Bi for i in range(len(TASKS)) if TASKS%5Bi%5D%5B1%5D%5B0%5D == end_with%5D%5B0%5D%0A tasks = TASKS%5Bfirst_task_idx:(last_task_idx+1)%5D
%0A
|
6535495c6bbe17122c86eb657243d675300cc382
|
add visit_Attribute to adjust numpy fns
|
autodiff/ast.py
|
autodiff/ast.py
|
import logging
import meta
import ast
import numpy as np
import theano
import theano.tensor as T
logger = logging.getLogger('pyautodiff')
def istensor(x):
tensortypes = (theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, tensortypes)
def isvar(x):
vartypes = (theano.tensor.sharedvar.SharedVariable,
theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, vartypes)
def get_ast(func, flags=0):
func_def = meta.decompiler.decompile_func(func)
if isinstance(func_def, ast.Lambda):
func_def = ast.FunctionDef(name='<lambda>', args=func_def.args,
body=[ast.Return(func_def.body)],
decorator_list=[])
assert isinstance(func_def, ast.FunctionDef)
return func_def
def print_ast(ast):
meta.asttools.print_ast(ast)
def print_source(ast):
meta.asttools.python_source(ast)
class TheanoTransformer(ast.NodeTransformer):
def __init__(self):
super(TheanoTransformer, self).__init__()
self.smap = dict()
def ast_wrap(self, node, method_name):
wrapped = ast.Call(args=[node],
func=ast.Attribute(attr=method_name,
ctx=ast.Load(),
value=ast.Name(ctx=ast.Load(),
id='self')),
keywords=[],
kwargs=None,
starargs=None)
return wrapped
def shadow(self, x):
if not isinstance(x, (int, float, np.ndarray)):
return x
# take special care with small ints, because CPYthon caches them.
# This makes it impossible to tell one from the other.
if isinstance(x, int) and -5 <= x <= 256:
x = np.int_(x)
elif isinstance(x, float):
x = np.float_(x)
if getattr(x, 'dtype', None) == bool:
logger.info('Warning: Theano has no bool type; upgrading to int8.')
x = x.astype('int8')
sym_x = theano.shared(x)
return self.smap.setdefault(id(x), sym_x)
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, ast.Load):
node = self.ast_wrap(node, 'shadow')
return node
return node
def test_run(self, f):
a = get_ast(f)
self.visit(a)
a = ast.fix_missing_locations(a)
new_globals = globals()
new_globals.update({'self' : self})
new_f = meta.decompiler.compile_func(a, '<TheanoTransformer-AST>', new_globals)
return new_f
|
Python
| 0
|
@@ -2270,24 +2270,393 @@
x), sym_x)%0A%0A
+ def switch_numpy_theano(self, func):%0A # if the function comes from numpy...%0A if ((getattr(func, '__module__', None)%0A and func.__module__.startswith('numpy'))%0A or isinstance(func, np.ufunc)):%0A%0A # try to get the theano version...%0A return getattr(T, func.__name__, func)%0A else:%0A return func%0A%0A
def visi
@@ -2819,16 +2819,145 @@
urn node
+%0A%0A def visit_Attribute(self, node):%0A self.generic_visit(node)%0A node = self.ast_wrap(node, 'switch_numpy_theano')
%0A
|
91ef2866d14348971326df39d7868ad5c424b64c
|
remove the 10 article limit that was used for testing
|
autoindex_sk.py
|
autoindex_sk.py
|
#!/usr/bin/env python3
import sys
import csv
from bs4 import BeautifulSoup
import autoindex
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import DC, DCTERMS, SKOS, XSD
def autoindex_doc(text, url, title, date, author, place):
g = Graph()
uri = URIRef(url)
g.add((uri, DCTERMS.title, Literal(title, 'fi')))
g.add((uri, DCTERMS.issued, Literal(date, datatype=XSD.date)))
if author:
g.add((uri, DCTERMS.creator, Literal(author, 'fi')))
if place:
g.add((uri, DCTERMS.spatial, Literal(place, 'fi')))
results = autoindex.autoindex(text, 'yso-finna-fi', threshold=0.85, maxhits=3)
for result in results:
g.add((uri, DCTERMS.subject, URIRef(result['uri'])))
g.add((URIRef(result['uri']), SKOS.prefLabel, Literal(result['label'], 'fi')))
return g
def html_to_text(html):
soup = BeautifulSoup(html, 'lxml')
return soup.get_text()
reader = csv.reader(open(sys.argv[1], 'r'), delimiter='|')
n = 0
for row in reader:
id = row[0]
title = html_to_text(row[1])
date = row[2].strip()
author = row[3].strip()
place = row[4].strip()
text = title + " " + html_to_text(row[6])
url = "http://sk.example.com/%s" % id
g = autoindex_doc(text, url, title, date, author, place)
g.serialize(destination=sys.stdout.buffer, format='nt')
n += 1
if n == 10:
break
|
Python
| 0
|
@@ -981,14 +981,8 @@
%7C')%0A
-n = 0%0A
for
@@ -1344,47 +1344,6 @@
nt')
-%0A n += 1%0A if n == 10:%0A break
%0A%0A
|
7b1bcc5114f5fcfab9b5a827529a6388389df690
|
Version bump
|
aws/__init__.py
|
aws/__init__.py
|
__version__ = '0.2.2'
|
Python
| 0.000001
|
@@ -12,11 +12,11 @@
= '0.2.
-2
+3
'%0A
|
5d21942823ea21a3c2eb38e43b4b8b4fa2ec2ac1
|
Allow mayday.us for CORS
|
backend/util.py
|
backend/util.py
|
"""General utilities."""
import urlparse
import logging
def ConstantTimeIsEqual(a, b):
"""Securely compare two strings without leaking timing information."""
if len(a) != len(b):
return False
acc = 0
for x, y in zip(a, b):
acc |= ord(x) ^ ord(y)
return acc == 0
# TODO(hjfreyer): Pull into some kind of middleware?
def EnableCors(handler):
"""Inside a request, set the headers to allow being called cross-domain."""
if 'Origin' in handler.request.headers:
origin = handler.request.headers['Origin']
_, netloc, _, _, _, _ = urlparse.urlparse(origin)
if not (netloc == 'mayone.us' or netloc.endswith('.mayone.us')):
logging.warning('Invalid origin: ' + origin)
handler.error(403)
return
handler.response.headers.add_header('Access-Control-Allow-Origin', origin)
handler.response.headers.add_header('Access-Control-Allow-Methods',
'GET, POST')
handler.response.headers.add_header('Access-Control-Allow-Headers',
'content-type, origin')
|
Python
| 0
|
@@ -642,16 +642,86 @@
one.us')
+ or%0A netloc == 'mayday.us' or netloc.endswith('.mayday.us')
):%0A
|
9b94e34352485ad1054a6fc79ea37bc889949b57
|
print oops
|
bakthat/sync.py
|
bakthat/sync.py
|
# -*- encoding: utf-8 -*-
import logging
import socket
from bakthat.models import Backups, Config
from bakthat.conf import config
import requests
import json
log = logging.getLogger(__name__)
def bakmanager_hook(conf, backup_data, key=None):
"""First version of a hook for monitoring periodic backups with BakManager
(https://bakmanager.io).
:type conf: dict
:param conf: Current profile config
:type backup_data: dict
:param backup_data: Backup data (size)
:type key: str
:param key: Periodic backup identifier
"""
try:
if conf.get("bakmanager_token"):
bak_backup = {"key": key, "host": socket.gethostname(), "size": backup_data["size"]}
bak_payload = {"backup": json.dumps(bak_backup)}
print bak_payload
print conf.get("bakmanager_token")
r = requests.post("https://bakmanager.io/api/backups/", bak_payload, auth=(conf.get("bakmanager_token"), ""))
print r.json()
r.raise_for_status()
else:
log.error("No bakmanager_token setting for the current profile.")
except Exception, exc:
log.error("Error while submitting periodic backup to BakManager.")
log.exception(exc)
class BakSyncer():
"""Helper to synchronize change on a backup set via a REST API.
No sensitive information is transmitted except (you should be using https):
- API user/password
- a hash (hashlib.sha512) of your access_key concatened with
your s3_bucket or glacier_vault, to be able to sync multiple
client with the same configuration stored as metadata for each bakckupyy.
:type conf: dict
:param conf: Config (url, username, password)
"""
def __init__(self, conf=None):
conf = {} if conf is None else conf
sync_conf = dict(url=config.get("sync", {}).get("url"),
username=config.get("sync", {}).get("username"),
password=config.get("sync", {}).get("password"))
sync_conf.update(conf)
self.sync_auth = (sync_conf["username"], sync_conf["password"])
self.api_url = sync_conf["url"]
self.request_kwargs = dict(auth=self.sync_auth)
self.request_kwargs["headers"] = {'content-type': 'application/json', 'bakthat-client': socket.gethostname()}
self.get_resource = lambda x: self.api_url + "/{0}".format(x)
def register(self):
"""Register/create the current host on the remote server if not already registered."""
if not Config.get_key("client_id"):
r_kwargs = self.request_kwargs.copy()
r = requests.post(self.get_resource("clients"), **r_kwargs)
if r.status_code == 200:
client = r.json()
if client:
Config.set_key("client_id", client["_id"])
else:
log.error("An error occured during sync: {0}".format(r.text))
else:
log.debug("Already registered ({0})".format(Config.get_key("client_id")))
def sync(self):
"""Draft for implementing bakthat clients (hosts) backups data synchronization.
Synchronize Bakthat sqlite database via a HTTP POST request.
Backups are never really deleted from sqlite database, we just update the is_deleted key.
It sends the last server sync timestamp along with data updated since last sync.
Then the server return backups that have been updated on the server since last sync.
On both sides, backups are either created if they don't exists or updated if the incoming version is newer.
"""
log.debug("Start syncing")
self.register()
last_sync_ts = Config.get_key("sync_ts", 0)
to_insert_in_mongo = [b._data for b in Backups.search(last_updated_gt=last_sync_ts)]
data = dict(sync_ts=last_sync_ts, new=to_insert_in_mongo)
r_kwargs = self.request_kwargs.copy()
log.debug("Initial payload: {0}".format(data))
r_kwargs.update({"data": json.dumps(data)})
r = requests.post(self.get_resource("backups/sync"), **r_kwargs)
if r.status_code != 200:
log.error("An error occured during sync: {0}".format(r.text))
return
log.debug("Sync result: {0}".format(r.json()))
to_insert_in_bakthat = r.json().get("updated", [])
sync_ts = r.json().get("sync_ts")
for newbackup in to_insert_in_bakthat:
log.debug("Upsert {0}".format(newbackup))
Backups.upsert(**newbackup)
Config.set_key("sync_ts", sync_ts)
log.debug("Sync succcesful")
def reset_sync(self):
log.debug("reset sync")
Config.set_key("sync_ts", 0)
Config.set_key("client_id", None)
def sync_auto(self):
"""Trigger sync if autosync is enabled."""
if config.get("sync", {}).get("auto", False):
self.sync()
|
Python
| 0.000039
|
@@ -764,85 +764,8 @@
p)%7D%0A
- print bak_payload%0A print conf.get(%22bakmanager_token%22)%0A
@@ -886,35 +886,8 @@
%22))%0A
- print r.json()%0A
|
7e742489017bc444f496b1f4cf6ed391caf49ba2
|
allow enter to close change note type diag (#651)
|
aqt/modelchooser.py
|
aqt/modelchooser.py
|
# -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
from anki.hooks import addHook, remHook, runHook
from aqt.utils import shortcut
import aqt
class ModelChooser(QHBoxLayout):
def __init__(self, mw, widget, label=True):
QHBoxLayout.__init__(self)
self.widget = widget
self.mw = mw
self.deck = mw.col
self.label = label
self.setMargin(0)
self.setSpacing(8)
self.setupModels()
addHook('reset', self.onReset)
self.widget.setLayout(self)
def setupModels(self):
if self.label:
self.modelLabel = QLabel(_("Type"))
self.addWidget(self.modelLabel)
# models box
self.models = QPushButton()
#self.models.setStyleSheet("* { text-align: left; }")
self.models.setToolTip(shortcut(_("Change Note Type (Ctrl+N)")))
s = QShortcut(QKeySequence(_("Ctrl+N")), self.widget)
s.connect(s, SIGNAL("activated()"), self.onModelChange)
self.addWidget(self.models)
self.connect(self.models, SIGNAL("clicked()"), self.onModelChange)
# layout
sizePolicy = QSizePolicy(
QSizePolicy.Policy(7),
QSizePolicy.Policy(0))
self.models.setSizePolicy(sizePolicy)
self.updateModels()
def cleanup(self):
remHook('reset', self.onReset)
def onReset(self):
self.updateModels()
def show(self):
self.widget.show()
def hide(self):
self.widget.hide()
def onEdit(self):
import aqt.models
aqt.models.Models(self.mw, self.widget)
def onModelChange(self):
from aqt.studydeck import StudyDeck
current = self.deck.models.current()['name']
# edit button
edit = QPushButton(_("Manage"))
self.connect(edit, SIGNAL("clicked()"), self.onEdit)
def nameFunc():
return sorted(self.deck.models.allNames())
ret = StudyDeck(
self.mw, names=nameFunc,
accept=_("Choose"), title=_("Choose Note Type"),
help="_notes", current=current, parent=self.widget,
buttons=[edit], cancel=False)
if not ret.name:
return
m = self.deck.models.byName(ret.name)
self.deck.conf['curModel'] = m['id']
cdeck = self.deck.decks.current()
cdeck['mid'] = m['id']
self.deck.decks.save(cdeck)
runHook("currentModelChanged")
self.mw.reset()
def updateModels(self):
self.models.setText(self.deck.models.current()['name'])
|
Python
| 0
|
@@ -1083,32 +1083,74 @@
.onModelChange)%0A
+ self.models.setAutoDefault(False)%0A
self.add
|
122339c5139b570940d9ed396a066b550965a13a
|
Add default affiliation to person
|
dexter/models/person.py
|
dexter/models/person.py
|
# -*- coding: utf-8 -*-
from itertools import groupby
from sqlalchemy import (
Column,
DateTime,
ForeignKey,
Integer,
String,
func,
)
from sqlalchemy.orm import relationship
from wtforms import StringField, validators, SelectField, HiddenField
from .support import db
from ..forms import Form, MultiCheckboxField
class Person(db.Model):
"""
A person, with a bit more info than just the 'person' entity. Multiple 'person' entities
can link to a single person.
"""
__tablename__ = "people"
id = Column(Integer, primary_key=True)
name = Column(String(100), index=True, nullable=False, unique=True)
gender_id = Column(Integer, ForeignKey('genders.id'))
race_id = Column(Integer, ForeignKey('races.id'))
created_at = Column(DateTime(timezone=True), index=True, unique=False, nullable=False, server_default=func.now())
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.current_timestamp())
# Associations
gender = relationship("Gender", lazy=False)
race = relationship("Race", lazy=False)
def entity(self):
""" Get an entity that is linked to this person. Because many entities can be linked, we
try find the one with an exact name match before just returning any old one. """
from . import Entity
last = None
# get all the entities and try to find the one that has an exact
# name match
for e in self.entities:
last = e
if e.name == self.name:
return e
# no exact match, just return the last one
return last
def get_alias_entity_ids(self):
"""
Return a list of entity ids that are aliases for this person.
"""
return [e.id for e in self.entities]
def set_alias_entity_ids(self, ids):
"""
Updated entities linked to this person by setting a list of
entity ids.
"""
from . import Entity
self.entities = Entity.query.filter(Entity.id.in_(ids)).all()
alias_entity_ids = property(get_alias_entity_ids, set_alias_entity_ids)
def json(self):
return {
'id': self.id,
'name': self.name,
'race': self.race.name if self.race else None,
'gender': self.gender.name if self.gender else None,
}
def __repr__(self):
return "<Person id=%s, name=\"%s\">" % (self.id, self.name.encode('utf-8'))
@classmethod
def get_or_create(cls, name, gender=None, race=None):
from . import Entity
p = Person.query.filter(Person.name == name).first()
if not p:
p = Person()
p.name = name
if gender:
p.gender = gender
if race:
p.race = race
# link entities that are similar
for e in Entity.query.filter(Entity.name == name, Entity.group == 'person', Entity.person == None).all():
e.person = p
db.session.add(p)
# force a db write (within the transaction) so subsequent lookups
# find this entity
db.session.flush()
return p
class PersonForm(Form):
gender_id = SelectField('Gender', default='')
race_id = SelectField('Race', default='')
alias_entity_ids = MultiCheckboxField('Aliases')
def __init__(self, *args, **kwargs):
super(PersonForm, self).__init__(*args, **kwargs)
from . import Entity
self.gender_id.choices = [['', '(unknown gender)']] + [[str(g.id), g.name] for g in Gender.query.order_by(Gender.name).all()]
self.race_id.choices = [['', '(unknown race)']] + [[str(r.id), r.name] for r in Race.query.order_by(Race.name).all()]
# we don't care if the entities are in the valid list or not
self.alias_entity_ids.pre_validate = lambda form: True
class Gender(db.Model):
__tablename__ = "genders"
id = Column(Integer, primary_key=True)
name = Column(String(150), index=True, nullable=False, unique=True)
def __repr__(self):
return "<Gender name='%s'>" % (self.name)
def abbr(self):
return self.name[0].upper()
@classmethod
def male(cls):
return Gender.query.filter(Gender.name == 'Male').one()
@classmethod
def female(cls):
return Gender.query.filter(Gender.name == 'Female').one()
@classmethod
def create_defaults(cls):
text = """
Female
Male
Other: Transgender, Transsexual
"""
genders = []
for s in text.strip().split("\n"):
g = Gender()
g.name = s.strip()
genders.append(g)
return genders
class Race(db.Model):
__tablename__ = "races"
id = Column(Integer, primary_key=True)
name = Column(String(50), index=True, nullable=False, unique=True)
def __repr__(self):
return "<Race name='%s'>" % (self.name)
def abbr(self):
return self.name[0].upper()
@classmethod
def create_defaults(self):
text = """
Black
White
Coloured
Asian
Indian
Other
"""
races = []
for s in text.strip().split("\n"):
g = Race()
g.name = s.strip()
races.append(g)
return races
|
Python
| 0.000001
|
@@ -782,16 +782,84 @@
es.id'))
+%0A affiliation_id = Column(Integer, ForeignKey('affiliations.id'))
%0A%0A cr
@@ -1208,16 +1208,62 @@
y=False)
+%0A affiliation = relationship(%22Affiliation%22)
%0A%0A de
|
52873e4238a54cb93f403d509d2bebef8971ec9b
|
Work around deprecation warning with new cssutils versions.
|
django_assets/filter/cssutils/__init__.py
|
django_assets/filter/cssutils/__init__.py
|
import logging
import logging.handlers
from django.conf import settings
from django_assets.filter import BaseFilter
__all__ = ('CSSUtilsFilter',)
class CSSUtilsFilter(BaseFilter):
"""Minifies CSS by removing whitespace, comments etc., using the Python
`cssutils <http://cthedot.de/cssutils/>`_ library.
Note that since this works as a parser on the syntax level, so invalid
CSS input could potentially result in data loss.
"""
name = 'cssutils'
def setup(self):
import cssutils
self.cssutils = cssutils
try:
# cssutils logs to stdout by default, hide that in production
if not settings.DEBUG:
log = logging.getLogger('assets.cssutils')
log.addHandler(logging.handlers.MemoryHandler(10))
cssutils.log.setlog(log)
except ImportError:
# During doc generation, Django is not going to be setup and will
# fail when the settings object is accessed. That's ok though.
pass
def apply(self, _in, out):
sheet = self.cssutils.parseString(_in.read())
self.cssutils.ser.prefs.useMinified()
out.write(sheet.cssText)
|
Python
| 0
|
@@ -822,24 +822,26 @@
ndler(10))%0D%0A
+%0D%0A
@@ -848,27 +848,291 @@
-cssutils.log.setlog
+# Newer versions of cssutils print a deprecation warning%0D%0A # for 'setlog'.%0D%0A if hasattr(cssutils.log, 'setLog'):%0D%0A func = cssutils.log.setLog%0D%0A else:%0D%0A func = cssutils.log.setlog%0D%0A func
(log
|
50f880d4f90e1629c57a89508ce930654a176af3
|
Version bump
|
bucky/__init__.py
|
bucky/__init__.py
|
# -*- coding: utf-8 -
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# Copyright 2011 Cloudant, Inc.
version_info = (0, 2, 5)
__version__ = ".".join(map(str, version_info))
|
Python
| 0.000001
|
@@ -620,9 +620,9 @@
2,
-5
+6
)%0A__
|
52d804aac69bceb9dee9c1b21044551b80bcdfdc
|
Fix handling default for `--output` option in `people_search` cmd.
|
linkedin_scraper/commands/people_search.py
|
linkedin_scraper/commands/people_search.py
|
from getpass import getpass
from scrapy.commands.crawl import Command as BaseCommand
def sanitize_query(query):
return query.replace(' ', '+')
class Command(BaseCommand):
def short_desc(self):
return "Scrap people from LinkedIn"
def syntax(self):
return "[options] <query>"
def add_options(self, parser):
super().add_options(parser)
parser.add_option('-u', '--username', help='Name of LinkedIn account')
parser.add_option('-p', '--password',
help='Password for LinkedIn account')
def process_options(self, args, opts):
super().process_options(args, opts)
opts.output = opts.output or 'results.csv'
people_search_options = {
'query': sanitize_query(args[0]),
'username': opts.username or input(
'Please provide your LinkedIn username: '),
'password': opts.password or getpass(
'Please provide password for your LinkedIn account: ')
}
opts.spargs.update(people_search_options)
def run(self, args, opts):
# Run people_search spider
args = ['people_search']
super().run(args, opts)
|
Python
| 0
|
@@ -619,95 +619,95 @@
-super().process_options(args, opts)%0A%0A opts.output = opts.output or 'results.csv'
+opts.output = opts.output or 'results.csv'%0A%0A super().process_options(args, opts)
%0A%0A
|
6f759be609403aadfa0845f5cdad7869d49f7ebb
|
add instructions on how to request to join
|
clans/clans.py
|
clans/clans.py
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import re
from collections import defaultdict
import aiohttp
import crapipy
import discord
import yaml
from __main__ import send_cmd_help
from box import Box
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
PATH = os.path.join("data", "clans")
JSON = os.path.join(PATH, "settings.json")
CONFIG_YAML = os.path.join(PATH, "config.yml")
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class Clans:
"""Auto parse clan info and display requirements"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
@checks.mod_or_permissions()
@commands.group(pass_context=True)
async def clansset(self, ctx):
"""Settings"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@checks.mod_or_permissions()
@clansset.command(name="config", pass_context=True, no_pm=True)
async def clansset_config(self, ctx):
"""Upload config yaml file. See config.example.yml for how to format it."""
TIMEOUT = 60.0
await self.bot.say(
"Please upload family config yaml file. "
"[Timeout: {} seconds]".format(TIMEOUT))
attach_msg = await self.bot.wait_for_message(
timeout=TIMEOUT,
author=ctx.message.author)
if attach_msg is None:
await self.bot.say("Operation time out.")
return
if not len(attach_msg.attachments):
await self.bot.say("Cannot find attachments.")
return
attach = attach_msg.attachments[0]
url = attach["url"]
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
with open(CONFIG_YAML, "wb") as f:
f.write(await resp.read())
await self.bot.say(
"Attachment received and saved as {}".format(CONFIG_YAML))
self.settings['config'] = CONFIG_YAML
dataIO.save_json(JSON, self.settings)
@property
def clans_config(self):
if os.path.exists(CONFIG_YAML):
with open(CONFIG_YAML) as f:
config = Box(yaml.load(f))
return config
return None
@commands.command(pass_context=True, no_pm=True)
async def clans(self, ctx, *args):
"""Display clan info.
[p]clans -m Disable member count
[p]clans -t Disable clan tag
"""
await self.bot.type()
client = crapipy.AsyncClient()
config = self.clans_config
clan_tags = [clan.tag for clan in config.clans]
clans = await client.get_clans(clan_tags)
color = getattr(discord.Color, config.color)()
em = discord.Embed(
title=config.name,
description="Minimum trophies to join our Clash Royale clans. Current trophies required unless PB (personal best) is specified.",
color=color
)
badge_url = None
show_member_count = "-m" not in args
show_clan_tag = "-t" not in args
for clan in clans:
match = re.search('[\d,O]{4,}', clan.description)
pb_match = re.search('PB', clan.description)
name = clan.name
trophies = 'N/A'
if match is not None:
trophies = match.group(0)
trophies = trophies.replace(',', '')
trophies = trophies.replace('O', '0')
trophies = '{:,}'.format(int(trophies))
pb = ''
if pb_match is not None:
pb = ' PB'
member_count = ''
if show_member_count:
member_count = ', {} / 50'.format(clan.member_count)
clan_tag = ''
if show_clan_tag:
clan_tag = ', #{}'.format(clan.tag)
value = '`{trophies}{pb}{member_count}{clan_tag}`'.format(
clan_tag=clan_tag,
member_count=member_count,
trophies=trophies,
pb=pb)
em.add_field(name=name, value=value, inline=False)
if badge_url is None:
badge_url = 'https://cr-api.github.io/cr-api-assets/badge/{}.png'.format(clan.badge.key)
em.set_thumbnail(url=badge_url)
await self.bot.say(embed=em)
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = Clans(bot)
bot.add_cog(n)
|
Python
| 0
|
@@ -5507,16 +5507,217 @@
ge_url)%0A
+ em.set_footer(%0A text='If you meet requirements, please request to join in-app. Let us know when you have been accepted so we can update your membership roles.'%0A )%0A
|
637b3c36e9a5952fc29ceaa705703e94f9f172d3
|
Update app_settings.py
|
django_project/wms_client/app_settings.py
|
django_project/wms_client/app_settings.py
|
# coding=utf-8
"""Settings file for WMS Client.
"""
from django.conf import settings
# Allow base django project to override settings
default_leaflet_tiles = (
'OpenStreetMap',
'http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
('© <a hr ef="http://www.openstreetmap.org" target="_parent">OpenStreetMap'
'</a> and contributors, under an <a '
'href="http://www.openstreetmap.org/copyright" target="_parent">open '
'license</a>')
)
LEAFLET_TILES = getattr(settings, 'LEAFLET_TILES', default_leaflet_tiles)
settings.TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.media',
)
|
Python
| 0.000002
|
@@ -250,19 +250,16 @@
'%C2%A9 %3Ca hr
-
ef=%22http
|
cdb4f7088ba49c0e2b590d8b818226e4e59eb45e
|
Fix tests.
|
st2client/tests/unit/test_config_parser.py
|
st2client/tests/unit/test_config_parser.py
|
# coding=utf-8
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import unittest2
from st2client.config_parser import CLIConfigParser
from st2client.config_parser import CONFIG_DEFAULT_VALUES
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE_PATH_FULL = os.path.join(BASE_DIR, '../fixtures/st2rc.full.ini')
CONFIG_FILE_PATH_PARTIAL = os.path.join(BASE_DIR, '../fixtures/st2rc.partial.ini')
CONFIG_FILE_PATH_UNICODE = os.path.join(BASE_DIR, '../fixtures/test_unicode.ini')
class CLIConfigParserTestCase(unittest2.TestCase):
def test_constructor(self):
parser = CLIConfigParser(config_file_path='doesnotexist', validate_config_exists=False)
self.assertTrue(parser)
self.assertRaises(ValueError, CLIConfigParser, config_file_path='doestnotexist',
validate_config_exists=True)
def test_parse(self):
# File doesn't exist
parser = CLIConfigParser(config_file_path='doesnotexist', validate_config_exists=False)
result = parser.parse()
self.assertEqual(CONFIG_DEFAULT_VALUES, result)
# File exists - all the options specified
expected = {
'general': {
'base_url': 'http://127.0.0.1',
'api_version': 'v1',
'cacert': 'cacartpath',
'silence_ssl_warnings': False
},
'cli': {
'debug': True,
'cache_token': False,
'timezone': 'UTC'
},
'credentials': {
'username': 'test1',
'password': 'test1',
'api_key': None
},
'api': {
'url': 'http://127.0.0.1:9101/v1'
},
'auth': {
'url': 'http://127.0.0.1:9100/'
},
'stream': {
'url': 'http://127.0.0.1:9102/v1/stream'
}
}
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_FULL,
validate_config_exists=False)
result = parser.parse()
self.assertEqual(expected, result)
# File exists - missing options, test defaults
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_PARTIAL,
validate_config_exists=False)
result = parser.parse()
self.assertTrue(result['cli']['cache_token'], True)
def test_get_config_for_unicode_char(self):
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_UNICODE,
validate_config_exists=False)
config = parser.parse()
if six.PY3:
self.assertEqual(config['credentials']['password'], u'测试')
else:
self.assertEqual(config['credentials']['password'], u'\u5bc6\u7801')
|
Python
| 0
|
@@ -3528,12 +3528,11 @@
'%5D,
-u'%E6%B5%8B%E8%AF%95
+'%E5%AF%86%E7%A0%81
')%0A
|
4071c77a6e598c27f7a8b2195ff5e68332120615
|
Fix formatting.
|
st2common/st2common/cmd/validate_config.py
|
st2common/st2common/cmd/validate_config.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for validating a config file against a a particular config schema.
"""
import yaml
import traceback
from oslo_config import cfg
from st2common.constants.system import VERSION_STRING
from st2common.constants.exit_codes import SUCCESS_EXIT_CODE
from st2common.constants.exit_codes import FAILURE_EXIT_CODE
from st2common.util.pack import validate_config_against_schema
__all__ = [
'main'
]
def _do_register_cli_opts(opts, ignore_errors=False):
for opt in opts:
try:
cfg.CONF.register_cli_opt(opt)
except:
if not ignore_errors:
raise
def _register_cli_opts():
cli_opts = [
cfg.StrOpt('schema-path', default=None, required=True,
help='Path to the config schema to use for validation.'),
cfg.StrOpt('config-path', default=None, required=True,
help='Path to the config file to validate.'),
]
for opt in cli_opts:
cfg.CONF.register_cli_opt(opt)
def main():
_register_cli_opts()
cfg.CONF(args=None, version=VERSION_STRING)
schema_path = cfg.CONF.schema_path
config_path = cfg.CONF.config_path
print('Validating config "%s" against schema in "%s"' % (config_path, schema_path))
with open(schema_path, 'r') as fp:
config_schema = yaml.safe_load(fp.read())
with open(config_path, 'r') as fp:
config_object = yaml.safe_load(fp.read())
try:
validate_config_against_schema(config_schema=config_schema, config_object=config_object)
except Exception as e:
print('Failed to validate pack config: %s', str(e))
traceback.print_exc()
return FAILURE_EXIT_CODE
print('Config "%s" successfuly validated against schema in %s.' % (config_path, schema_path))
return SUCCESS_EXIT_CODE
|
Python
| 0.000017
|
@@ -871,25 +871,8 @@
yaml
-%0Aimport traceback
%0A%0Afr
@@ -2371,17 +2371,18 @@
fig: %25s'
-,
+ %25
str(e))
@@ -2386,38 +2386,8 @@
e))%0A
- traceback.print_exc()%0A
|
80f7ec1345ffb971ba2c8de4962e10321543ed93
|
fix flake8 error.
|
django_version_viewer/tests/test_views.py
|
django_version_viewer/tests/test_views.py
|
import mock
from django.test import TestCase
import json
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.test import Client
class TestVersionViewer(TestCase):
url_django_version_viewer = reverse('django_version_viewer')
url_django_version_viewer_csv = reverse('django_version_viewer_csv')
url_django_version_viewer_toolbar = reverse('django_version_viewer_toolbar')
mock_data = [
{"key": "appdirs", "version": "1.4.3"},
{"key": "django", "version": "1.8.18"},
{"key": "six", "version": "1.10.0"}
]
def mocked_pip_get_installed_distributions(self, *args, **kwargs):
class MockPipObject:
def __init__(self, key, version):
self.key = key
self.version = version
class MockResponse:
def __init__(self, data):
self.packages = []
for _dict in data:
mocked_obj = MockPipObject(_dict['key'], _dict['version'])
self.packages.append(mocked_obj)
def get_installed_distributions(self):
return self.packages
return MockResponse(self.mock_data).get_installed_distributions()
def setUp(self):
self.admin = User.objects.create_superuser(
email="adminmail@mail.com",
username="admin_user",
password="password",
)
self.user = User.objects.create(
email="user@usermail.com",
username="regular_user",
password="password"
)
def tearDown(self):
self.admin.delete()
self.user.delete()
def test_django_version_viewer_view__dmin(self):
client = Client()
client.login(username=self.admin.username, password="password")
with mock.patch('pip.get_installed_distributions',
side_effect=self.mocked_pip_get_installed_distributions):
response = client.get(self.url_django_version_viewer)
json_response = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json_response), 3)
def test_django_version_viewer_view__user(self):
client = Client()
client.login(username=self.user.username, password="password")
response = client.get(self.url_django_version_viewer)
self.assertEqual(response.status_code, 403)
def test_django_version_viewer_csv_view__admin(self):
client = Client()
client.login(username=self.admin.username, password="password")
with mock.patch('pip.get_installed_distributions',
side_effect=self.mocked_pip_get_installed_distributions):
response = client.get(self.url_django_version_viewer_csv)
self.assertEqual(
response.content,
b'Package Name,Package Version\r\nappdirs,1.4.3\r\ndjango,1.8.18\r\nsix,1.10.0\r\n'
)
self.assertEqual(response.status_code, 200)
def test_django_version_viewer_csv_view__user(self):
client = Client()
client.login(username=self.user.username, password="password")
response = client.get(self.url_django_version_viewer_csv)
self.assertEqual(response.status_code, 403)
def test_django_version_viewer_toolbar_view__admin(self):
client = Client()
client.login(username=self.admin.username, password="password")
with mock.patch('pip.get_installed_distributions',
side_effect=self.mocked_pip_get_installed_distributions):
response = client.get(self.url_django_version_viewer_toolbar)
context_pakcages = response.context['packages']
for i in list(range(0, 3)):
self.assertEqual(self.mock_data[i]['key'], context_pakcages[i]['package_name'])
self.assertEqual(self.mock_data[i]['version'], context_pakcages[i]['package_version'])
self.assertEqual(response.status_code, 200)
def test_django_version_viewer_toolbar_view__user(self):
client = Client()
client.login(username=self.user.username, password="password")
response = client.get(self.url_django_version_viewer_toolbar)
self.assertEqual(response.status_code, 403)
|
Python
| 0
|
@@ -3930,32 +3930,53 @@
elf.assertEqual(
+%0A
self.mock_data%5Bi
|
1b85f00a162c298970cfbd1dfa4661c60258169e
|
Mark two svnrdump tests as XFail for ra_svn, ra_serf and ra_neon
|
subversion/tests/cmdline/svnrdump_tests.py
|
subversion/tests/cmdline/svnrdump_tests.py
|
#!/usr/bin/env python
#
# svnrdump_tests.py: Tests svnrdump's remote repository dumping capabilities.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import sys, os
# Our testing module
import svntest
from svntest.verify import SVNUnexpectedStdout, SVNUnexpectedStderr
from svntest.verify import SVNExpectedStderr
from svntest.main import write_restrictive_svnserve_conf
from svntest.main import server_has_partial_replay
# (abbreviation)
Skip = svntest.testcase.Skip
SkipUnless = svntest.testcase.SkipUnless
XFail = svntest.testcase.XFail
Item = svntest.wc.StateItem
Wimp = svntest.testcase.Wimp
######################################################################
# Helper routines
def build_repos(sbox):
"""Build an empty sandbox repository"""
# Cleanup after the last run by removing any left-over repository.
svntest.main.safe_rmtree(sbox.repo_dir)
# Create an empty repository.
svntest.main.create_repos(sbox.repo_dir)
def run_dump_test(sbox, dumpfile_name):
"""Load a dumpfile using 'svnadmin load', dump it with 'svnrdump
dump' and check that the same dumpfile is produced"""
# Create an empty sanbox repository
build_repos(sbox)
# This directory contains all the dump files
svnrdump_tests_dir = os.path.join(os.path.dirname(sys.argv[0]),
'svnrdump_tests_data')
# Load the specified dump file into the sbox repository using
# svnadmin load
svnadmin_dumpfile = open(os.path.join(svnrdump_tests_dir,
dumpfile_name),
'rb').readlines()
svntest.actions.run_and_verify_load(sbox.repo_dir, svnadmin_dumpfile)
# Create a dump file using svnrdump
svnrdump_dumpfile = \
svntest.actions.run_and_verify_svnrdump(None, svntest.verify.AnyOutput,
[], 0, '-q', 'dump',
sbox.repo_url)
# Compare the output from stdout
svntest.verify.compare_and_display_lines(
"Dump files", "DUMP", svnadmin_dumpfile, svnrdump_dumpfile)
def run_load_test(sbox, dumpfile_name):
"""Load a dumpfile using 'svnrdump load', dump it with 'svnadmin
dump' and check that the same dumpfile is produced"""
# Create an empty sanbox repository
build_repos(sbox)
# Create the revprop-change hook for this test
svntest.actions.enable_revprop_changes(sbox.repo_dir)
# This directory contains all the dump files
svnrdump_tests_dir = os.path.join(os.path.dirname(sys.argv[0]),
'svnrdump_tests_data')
# Load the specified dump file into the sbox repository using
# svnrdump load
svnrdump_dumpfile = open(os.path.join(svnrdump_tests_dir,
dumpfile_name),
'rb').readlines()
# Set the UUID of the sbox repository to the UUID specified in the
# dumpfile ### RA layer doesn't have a set_uuid functionality
uuid = svnrdump_dumpfile[2].split(' ')[1][:-1]
svntest.actions.run_and_verify_svnadmin2("Setting UUID", None, None, 0,
'setuuid', sbox.repo_dir,
uuid)
svntest.actions.run_and_verify_svnrdump(svnrdump_dumpfile,
svntest.verify.AnyOutput,
[], 0, '-q', 'load',
sbox.repo_url)
# Create a dump file using svnadmin dump
svnadmin_dumpfile = svntest.actions.run_and_verify_dump(sbox.repo_dir, True)
# Compare the output from stdout
svntest.verify.compare_and_display_lines(
"Dump files", "DUMP", svnrdump_dumpfile, svnadmin_dumpfile)
######################################################################
# Tests
def basic_dump(sbox):
"dump the standard sbox repos"
sbox.build(read_only = True, create_wc = False)
out = \
svntest.actions.run_and_verify_svnrdump(None, svntest.verify.AnyOutput,
[], 0, '-q', 'dump',
sbox.repo_url)
if not out[0].startswith('SVN-fs-dump-format-version:'):
raise svntest.Failure('No valid output')
def revision_0_dump(sbox):
"dump revision zero"
run_dump_test(sbox, "revision-0.dump")
def revision_0_load(sbox):
"load revision zero"
run_load_test(sbox, "revision-0.dump")
# skeleton.dump repository layout
#
# Projects/ (Added r1)
# README (Added r2)
# Project-X (Added r3)
# Project-Y (Added r4)
# Project-Z (Added r5)
# docs/ (Added r6)
# README (Added r6)
def skeleton_load(sbox):
"skeleton repository"
run_load_test(sbox, "skeleton.dump")
def copy_and_modify_dump(sbox):
"copy and modify"
run_dump_test(sbox, "copy-and-modify.dump")
def copy_and_modify_load(sbox):
"copy and modify"
run_load_test(sbox, "copy-and-modify.dump")
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
basic_dump,
revision_0_dump,
revision_0_load,
skeleton_load,
copy_and_modify_load,
Wimp("Need to fix headers in RA layer", copy_and_modify_dump),
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
Python
| 0
|
@@ -6237,16 +6237,28 @@
+XFail(XFail(
skeleton
@@ -6259,24 +6259,104 @@
eleton_load,
+ svntest.main.is_ra_type_dav),%0A svntest.main.is_ra_type_svn),
%0A
@@ -6358,16 +6358,28 @@
+XFail(XFail(
copy_and
@@ -6387,24 +6387,104 @@
modify_load,
+ svntest.main.is_ra_type_dav),%0A svntest.main.is_ra_type_svn),
%0A
|
d35aed562b3c9eba6f7de7ac4aa7d6ad7723ec0a
|
Add listnener decos
|
cogs/cancer.py
|
cogs/cancer.py
|
from discord.ext import commands
class Cancer(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ok_list = [198101180180594688, 246291440106340352]
async def on_member_join(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("yes " + member.mention)
async def on_member_remove(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("no " + member.mention)
async def on_guild_emojis_update(self, guild, before, after):
if guild.id not in self.ok_list:
return
await guild.system_channel.send("the emojis were updated")
def setup(bot):
bot.add_cog(Cancer(bot))
|
Python
| 0
|
@@ -13,24 +13,28 @@
.ext
- import commands
+.commands import Cog
%0A%0A%0Ac
@@ -49,17 +49,8 @@
cer(
-commands.
Cog)
@@ -168,16 +168,34 @@
40352%5D%0A%0A
+ @Cog.listener%0A
asyn
@@ -362,32 +362,50 @@
ember.mention)%0A%0A
+ @Cog.listener%0A
async def on
@@ -569,24 +569,42 @@
r.mention)%0A%0A
+ @Cog.listener%0A
async de
|
1b3c9e5f46f48865882f1087ced0ade168233711
|
fix formatting and caching
|
cogs/stonks.py
|
cogs/stonks.py
|
import discord
import json
from datetime import datetime
from discord.ext import commands
from utils.aiohttp_wrap import aio_get_json
class Stonks(commands.Cog):
URL = "https://finnhub.io/api/v1/quote"
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
self.redis_client = bot.redis_client
# self.headers = {'X-Finnhub-Token': bot.api_keys["stonks"]}
with open('data/apikeys.json') as f:
self.api_key = json.load(f)["stonks"]
self.headers = {'X-Finnhub-Token': self.api_key}
@commands.command(name="stonk", aliases=["stonks", "stock", "stocks"])
async def stonks(self, ctx: commands.Context, *, symbol: str):
symbol = symbol.upper()
params = {"symbol": symbol}
redis_key = f"stonks:{symbol}"
if await self.redis_client.exists(redis_key):
resp = json.loads(await self.redis_client.get(redis_key))
else:
resp = await aio_get_json(self.session, self.URL, headers=self.headers, params=params)
if resp is None:
return await ctx.error("API Error", description="There was an issue with the stocks API, try again later")
if resp['t'] == 0:
return await ctx.error("Stock error", description=f"Couldn't find any stock information for `{symbol}`")
await self.redis_client.set(redis_key, json.dumps(resp))
em = discord.Embed(color=discord.Color.blurple())
em.set_author(name=symbol, icon_url="https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/twitter/259/chart-increasing_1f4c8.png")
em.add_field(name="Current Price", value=f"${resp['c']}")
em.add_field(name="Previous Close", value=f"${resp['pc']}")
em.add_field(name="% Change today", value=f"{(resp['c'] - resp['pc'])/resp['pc']:.2%}")
em.timestamp = datetime.fromtimestamp(resp['t'])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Stonks(bot))
|
Python
| 0.000001
|
@@ -204,16 +204,34 @@
/quote%22%0A
+ TTL = 60 * 15%0A
def
@@ -1456,16 +1456,29 @@
ps(resp)
+, ex=self.TTL
)%0A
@@ -1756,16 +1756,20 @@
esp%5B'c'%5D
+:.2f
%7D%22)%0A
@@ -1828,16 +1828,20 @@
sp%5B'pc'%5D
+:.2f
%7D%22)%0A
@@ -1872,17 +1872,17 @@
Change
-t
+T
oday%22, v
@@ -1933,16 +1933,40 @@
.2%25%7D%22)%0A%0A
+ em.set_footer()%0A
|
168c936a094a2f726c430f72b1595bae78d80c02
|
Fix crash on passing -h, -he or any substring of help as an argument. (#244)
|
src/invoice2data/main.py
|
src/invoice2data/main.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import shutil
import os
from os.path import join
import logging
from .input import pdftotext
from .input import pdfminer_wrapper
from .input import tesseract
from .input import tesseract4
from .input import gvision
from invoice2data.extract.loader import read_templates
from .output import to_csv
from .output import to_json
from .output import to_xml
logger = logging.getLogger(__name__)
input_mapping = {
'pdftotext': pdftotext,
'tesseract': tesseract,
'tesseract4': tesseract4,
'pdfminer': pdfminer_wrapper,
'gvision': gvision,
}
output_mapping = {'csv': to_csv, 'json': to_json, 'xml': to_xml, 'none': None}
def extract_data(invoicefile, templates=None, input_module=pdftotext):
"""Extracts structured data from PDF/image invoices.
This function uses the text extracted from a PDF file or image and
pre-defined regex templates to find structured data.
Reads template if no template assigned
Required fields are matches from templates
Parameters
----------
invoicefile : str
path of electronic invoice file in PDF,JPEG,PNG (example: "/home/duskybomb/pdf/invoice.pdf")
templates : list of instances of class `InvoiceTemplate`, optional
Templates are loaded using `read_template` function in `loader.py`
input_module : {'pdftotext', 'pdfminer', 'tesseract'}, optional
library to be used to extract text from given `invoicefile`,
Returns
-------
dict or False
extracted and matched fields or False if no template matches
Notes
-----
Import required `input_module` when using invoice2data as a library
See Also
--------
read_template : Function where templates are loaded
InvoiceTemplate : Class representing single template files that live as .yml files on the disk
Examples
--------
When using `invoice2data` as an library
>>> from invoice2data.input import pdftotext
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", None, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
"""
if templates is None:
templates = read_templates()
# print(templates[0])
extracted_str = input_module.to_text(invoicefile).decode('utf-8')
logger.debug('START pdftotext result ===========================')
logger.debug(extracted_str)
logger.debug('END pdftotext result =============================')
logger.debug('Testing {} template files'.format(len(templates)))
for t in templates:
optimized_str = t.prepare_input(extracted_str)
if t.matches_input(optimized_str):
return t.extract(optimized_str)
logger.error('No template for %s', invoicefile)
return False
def create_parser():
"""Returns argument parser """
parser = argparse.ArgumentParser(
description='Extract structured data from PDF files and save to CSV or JSON.'
)
parser.add_argument(
'--input-reader',
choices=input_mapping.keys(),
default='pdftotext',
help='Choose text extraction function. Default: pdftotext',
)
parser.add_argument(
'--output-format',
choices=output_mapping.keys(),
default='none',
help='Choose output format. Default: none',
)
parser.add_argument(
'--output-date-format',
dest='output_date_format',
default="%Y-%m-%d",
help='Choose output date format. Default: %Y-%m-%d (ISO 8601 Date)',
)
parser.add_argument(
'--output-name',
'-o',
dest='output_name',
default='invoices-output',
help='Custom name for output file. Extension is added based on chosen format.',
)
parser.add_argument(
'--debug', dest='debug', action='store_true', help='Enable debug information.'
)
parser.add_argument(
'--copy', '-c', dest='copy', help='Copy and rename processed PDFs to specified folder.'
)
parser.add_argument(
'--move', '-m', dest='move', help='Move and rename processed PDFs to specified folder.'
)
parser.add_argument(
'--filename-format',
dest='filename',
default="{date} {invoice_number} {desc}.pdf",
help='Filename format to use when moving or copying processed PDFs.'
'Default: "{date} {invoice_number} {desc}.pdf"',
)
parser.add_argument(
'--template-folder',
'-t',
dest='template_folder',
help='Folder containing invoice templates in yml file. Always adds built-in templates.',
)
parser.add_argument(
'--exclude-built-in-templates',
dest='exclude_built_in_templates',
default=False,
help='Ignore built-in templates.',
action="store_true",
)
parser.add_argument(
'input_files', type=argparse.FileType('r'), nargs='+', help='File or directory to analyze.'
)
return parser
def main(args=None):
"""Take folder or single file and analyze each."""
if args is None:
parser = create_parser()
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
input_module = input_mapping[args.input_reader]
output_module = output_mapping[args.output_format]
templates = []
# Load templates from external folder if set.
if args.template_folder:
templates += read_templates(os.path.abspath(args.template_folder))
# Load internal templates, if not disabled.
if not args.exclude_built_in_templates:
templates += read_templates()
output = []
for f in args.input_files:
res = extract_data(f.name, templates=templates, input_module=input_module)
if res:
logger.info(res)
output.append(res)
if args.copy:
filename = args.filename.format(
date=res['date'].strftime('%Y-%m-%d'),
invoice_number=res['invoice_number'],
desc=res['desc'],
)
shutil.copyfile(f.name, join(args.copy, filename))
if args.move:
filename = args.filename.format(
date=res['date'].strftime('%Y-%m-%d'),
invoice_number=res['invoice_number'],
desc=res['desc'],
)
shutil.move(f.name, join(args.move, filename))
f.close()
if output_module is not None:
output_module.write_to_file(output, args.output_name, args.output_date_format)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -3618,23 +3618,26 @@
efault:
+%25
%25Y-%25
+%25
m-%25
+%25
d (ISO 8
|
50e69a0d53dffbc961b865f583ca071dfb49648c
|
Reformat class
|
mediacloud/mediawords/util/sql.py
|
mediacloud/mediawords/util/sql.py
|
import time
import datetime
# noinspection PyPackageRequirements
import dateutil.parser
from mediawords.util.perl import decode_string_from_bytes_if_needed
def get_sql_date_from_epoch(epoch: int) -> str:
# Returns local date by default, no need to set timezone
try:
return datetime.datetime.fromtimestamp(int(epoch)).strftime('%Y-%m-%d %H:%M:%S')
except( ValueError ):
# mimic perl's behavior of sending the 0 epoch date on an error
return '1970-01-01 00:00:00'
def sql_now() -> str:
return get_sql_date_from_epoch(int(time.time()))
def get_epoch_from_sql_date(date: str) -> int:
"""Given a date in the sql format 'YYYY-MM-DD', return the epoch time."""
date = decode_string_from_bytes_if_needed(date)
parsed_date = dateutil.parser.parse(date)
return int(parsed_date.timestamp())
def increment_day(date: str, days: int = 1) -> str:
"""Given a date in the sql format 'YYYY-MM-DD', increment it by $days days."""
date = decode_string_from_bytes_if_needed(date)
if days == 0:
return date
epoch_date = get_epoch_from_sql_date(date) + (((days * 24) + 12) * 60 * 60)
return datetime.datetime.fromtimestamp(int(epoch_date)).strftime('%Y-%m-%d')
|
Python
| 0
|
@@ -370,17 +370,16 @@
except
-(
ValueEr
@@ -385,10 +385,8 @@
rror
- )
:%0A
@@ -393,17 +393,17 @@
#
-m
+M
imic per
|
94e9eda139234e8b9183ce5462bd7c9a9d97e197
|
Use global for 2 args
|
coala_quickstart/coala_quickstart.py
|
coala_quickstart/coala_quickstart.py
|
import argparse
import logging
import os
import sys
from pyprint.ConsolePrinter import ConsolePrinter
from coala_utils.FilePathCompleter import FilePathCompleter
from coala_utils.Question import ask_question
from coala_quickstart import __version__
from coala_quickstart.interaction.Logo import print_welcome_message
from coala_quickstart.generation.InfoCollector import collect_info
from coala_quickstart.generation.Project import (
ask_to_select_languages,
get_used_languages,
print_used_languages,
valid_path,
)
from coala_quickstart.generation.FileGlobs import get_project_files
from coala_quickstart.Strings import PROJECT_DIR_HELP
from coala_quickstart.generation.Bears import (
filter_relevant_bears,
print_relevant_bears,
get_non_optional_settings_bears,
remove_unusable_bears,
)
from coala_quickstart.generation.Settings import (
generate_settings, write_coafile)
from coala_quickstart.generation.SettingsClass import (
collect_bear_settings)
from coala_quickstart.green_mode.green_mode_core import green_mode
MAX_ARGS_GREEN_MODE = 5
MAX_VALUES_GREEN_MODE = 5
def _get_arg_parser():
description = """
coala-quickstart automatically creates a .coafile for use by coala.
"""
arg_parser = argparse.ArgumentParser(
prog='coala-quickstart',
description=description,
add_help=True
)
arg_parser.add_argument(
'-v', '--version', action='version', version=__version__)
arg_parser.add_argument(
'-C', '--non-interactive', const=True, action='store_const',
help='run coala-quickstart in non interactive mode')
arg_parser.add_argument(
'--ci', action='store_const', dest='non_interactive', const=True,
help='continuous integration run, alias for `--non-interactive`')
arg_parser.add_argument(
'--allow-incomplete-sections', action='store_const',
dest='incomplete_sections', const=True,
help='generate coafile with only `bears` and `files` field in sections')
arg_parser.add_argument(
'--no-filter-by-capabilities', action='store_const',
dest='no_filter_by_capabilities', const=True,
help='disable filtering of bears by their capabilties.')
arg_parser.add_argument(
'-g', '--green-mode', const=True, action='store_const',
help='Produce "green" config files for you project. Green config files'
' don\'t generate any error in the project and match the coala'
' configuration as closely as possible to your project.')
arg_parser.add_argument(
'--max-args', nargs='?', type=int,
help='Maximum number of optional settings allowed to be checked'
' by green_mode for each bear.')
arg_parser.add_argument(
'--max-values', nargs='?', type=int,
help='Maximum number of values to optional settings allowed to be'
' checked by green_mode for each bear.')
return arg_parser
def main():
arg_parser = _get_arg_parser()
args = arg_parser.parse_args()
logging.basicConfig(stream=sys.stdout)
printer = ConsolePrinter()
logging.getLogger(__name__)
fpc = None
project_dir = os.getcwd()
if args.green_mode:
args.no_filter_by_capabilities = None
args.incomplete_sections = None
if args.max_args:
MAX_ARGS_GREEN_MODE = args.max_args
if args.max_values:
MAX_VALUES_GREEN_MODE = args.max_values
if not args.green_mode and (args.max_args or args.max_values):
logging.warning(' --max-args and --max-values can be used '
'only with --green-mode. The arguments will '
'be ignored.')
if not args.non_interactive and not args.green_mode:
fpc = FilePathCompleter()
fpc.activate()
print_welcome_message(printer)
printer.print(PROJECT_DIR_HELP)
project_dir = ask_question(
'What is your project directory?',
default=project_dir,
typecast=valid_path)
fpc.deactivate()
project_files, ignore_globs = get_project_files(
None,
printer,
project_dir,
fpc,
args.non_interactive)
used_languages = list(get_used_languages(project_files))
used_languages = ask_to_select_languages(used_languages, printer,
args.non_interactive)
extracted_information = collect_info(project_dir)
relevant_bears = filter_relevant_bears(
used_languages, printer, arg_parser, extracted_information)
if args.green_mode:
bear_settings_obj = collect_bear_settings(relevant_bears)
green_mode(
project_dir, ignore_globs, relevant_bears, bear_settings_obj,
MAX_ARGS_GREEN_MODE,
MAX_VALUES_GREEN_MODE,
project_files,
printer,
)
exit()
print_relevant_bears(printer, relevant_bears)
if args.non_interactive and not args.incomplete_sections:
unusable_bears = get_non_optional_settings_bears(relevant_bears)
remove_unusable_bears(relevant_bears, unusable_bears)
print_relevant_bears(printer, relevant_bears, 'usable')
settings = generate_settings(
project_dir,
project_files,
ignore_globs,
relevant_bears,
extracted_information,
args.incomplete_sections)
write_coafile(printer, project_dir, settings)
|
Python
| 0
|
@@ -2980,24 +2980,78 @@
def main():%0A
+ global MAX_ARGS_GREEN_MODE, MAX_VALUES_GREEN_MODE%0A
arg_pars
|
b87711d62a1f2c4974f945625312d8a33ba91fb6
|
convert grp_members into a lambda and add usr_search lambda
|
code-samples/membersOfDomainGroup.py
|
code-samples/membersOfDomainGroup.py
|
#!/usr/bin/env python
# print a list of members of a domain group
param = {
'-f': 'mail', # field name
'-s': '\n', # separator
}
import getopt
import ldap
import re
import sys
try:
param.update(dict(getopt.getopt(sys.argv[1:], 'g:f:s:')[0]))
if '-g' not in param:
sys.stderr.write("-g parameter is required\n")
sys.exit(1)
except getopt.GetoptError:
sys.stderr.write("Usage: %s -g groupName [ -f LDAP field ] [ -s output separator ]\n" % sys.argv[0])
sys.exit(1)
ldapSrv = ldap.initialize('ldap://dc.example.com')
ldapSrv.bind_s('bind-user@example.com', 'bindPasSw0rd')
# get output filed from ldap results
ldap_output = lambda r: r[1][param['-f']][0]
# make a flat list from a list of lists
flat = lambda lst: reduce(lambda l, e: l + flat(e) if isinstance(e, list) else l + [e], lst, [])
# search for a group by filter
grp_search = lambda fltr: ldapSrv.search_s('ou=Resources,dc=example,dc=com', ldap.SCOPE_SUBTREE, '(&(objectclass=group)(%s))' % fltr, ['dn'])
# search for members in LDAP groups and return a nested list of them
def grp_members(gdn):
return [grp_members(grp[0]) for grp in grp_search('memberOf=%s' % gdn)
] + ldapSrv.search_s('ou=Users,dc=example,dc=com', ldap.SCOPE_SUBTREE, '(&(objectclass=person)(memberOf=%s))' % gdn, [param['-f']])
grp = grp_search('name=%s' % param['-g'])
if not grp:
sys.stderr.write("Group '%s' isn't found in LDAP\n" % param['-g'])
sys.exit(2)
print param['-s'].join(sorted(set(ldap_output(res) for res in flat(grp_members(grp[0][0])) if res)))
|
Python
| 0.000032
|
@@ -991,163 +991,61 @@
for
-memb
+us
ers in
- LDAP groups and return a nested list of them%0Adef grp_members(gdn):%0A%09return %5Bgrp_members(grp%5B0%5D) for grp in grp_search('memberOf=%25s' %25 gdn)%0A%09%09%5D +
+side a given group%0Ausr_search = lambda grpDN:
lda
@@ -1150,18 +1150,20 @@
s))' %25 g
-dn
+rpDN
, %5Bparam
@@ -1172,16 +1172,194 @@
-f'%5D%5D)%0A%0A
+# get a nested list of the members of a group with a given DN%0Agrp_members = lambda grpDN: %5Bgrp_members(grp%5B0%5D) for grp in grp_search('memberOf=%25s' %25 grpDN)%5D + usr_search(grpDN)%0A%0A
grp = gr
|
f577de1cd1bea44e2db5e8583d1d8edeb1b488e3
|
Fix in service credential setup to work with front-end.
|
src/biokbase/narrative/monkeypatch.py
|
src/biokbase/narrative/monkeypatch.py
|
"""
Module to monkeypatch the parts of the IPython core that we don't/won't rewrite
Yes, its nasty.
No, we wouldn't use these if there were an alternative
We use some code that GvR posted here as helpers:
https://mail.python.org/pipermail/python-dev/2008-January/076194.html
sychan@lbl.gov
"""
import os
import urllib
import re
import pprint
import IPython.html.notebook.handlers
import IPython.html.services.notebooks.handlers
import IPython
import biokbase.auth
import tornado
def monkeypatch_method(cls):
"""
To use:
from <somewhere> import <someclass>
@monkeypatch_method(<someclass>)
def <newmethod>(self, args):
return <whatever>
This adds <newmethod> to <someclass>
"""
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def monkeypatch_class(name, bases, namespace):
"""
To use:
from <somewhere> import <someclass>
class <newclass>(<someclass>):
__metaclass__ = monkeypatch_class
def <method1>(...): ...
def <method2>(...): ...
...
This adds <method1>, <method2>, etc. to <someclass>, and makes
<newclass> a local alias for <someclass>.
"""
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.iteritems():
if name != "__metaclass__":
setattr(base, name, value)
return base
# This is all kind of gross, but we will end up re-doing it later. Tentatively planning on
# submitting class wrapper decorators for the IPython Tornado request handlers, will do
# it as a patch to the IPython core and then submit a PR
def do_patching( c ):
auth_cookie_name = "kbase_narr_session"
if c.NotebookApp.get('kbase_auth',False):
IPython.html.base.handlers.app_log.debug("Monkeypatching IPython.html.notebook.handlers.NamedNotebookHandler.get() in process {}".format(os.getpid()))
cookierx = re.compile('([^ =|]+)=([^\|]*)')
def parsecookie(cookie):
""" Parser for Jim Thomason's login widget cookies """
sess = { k : v.replace('EQUALSSIGN','=').replace('PIPESIGN','|')
for k,v in cookierx.findall(urllib.unquote(cookie)) }
return sess
def cookie_pusher(cookie, handler):
"""
unpack a kbase cookie into a dict, and push it into the target handler's instance
as a kbase_session attribute
"""
cookierx = re.compile('([^ =|]+)=([^\|]*)')
""" Parser for Jim Thomason's login widget cookies """
sess = { k : v.replace('EQUALSSIGN','=').replace('PIPESIGN','|')
for k,v in cookierx.findall(urllib.unquote(cookie)) }
IPython.html.base.handlers.app_log.debug("user_id = " + sess.get('token','None'))
IPython.html.base.handlers.app_log.debug("token = " + sess.get('token','None'))
setattr(handler, 'kbase_session', sess)
# also push the token into the environment hash so that KBase python clients pick it up
biokbase.auth.set_environ_token(sess.get('token','None'))
IPython.html.base.handlers.app_log.debug("Monkeypatching IPython.html.notebook.handlers.NamedNotebookHandler.get() in process {}".format(os.getpid()))
old_get = IPython.html.notebook.handlers.NamedNotebookHandler.get
@monkeypatch_method(IPython.html.notebook.handlers.NamedNotebookHandler)
def get(self,notebook_id):
IPython.html.base.handlers.app_log.debug("notebook_id = " + notebook_id)
if auth_cookie_name in self.cookies and hasattr(self,'notebook_manager'):
IPython.html.base.handlers.app_log.debug("kbase_session = " + self.cookies[auth_cookie_name].value)
cookie_pusher(self.cookies[auth_cookie_name].value, getattr(self,'notebook_manager'))
return old_get(self,notebook_id)
IPython.html.base.handlers.app_log.debug("Monkeypatching IPython.html.services.notebooks.handlers.NotebookRootHandler.get() in process {}".format(os.getpid()))
old_get1 = IPython.html.services.notebooks.handlers.NotebookRootHandler.get
@monkeypatch_method(IPython.html.services.notebooks.handlers.NotebookRootHandler)
def get(self):
if auth_cookie_name in self.cookies:
cookie_pusher( self.cookies[auth_cookie_name].value,getattr(self,'notebook_manager'))
return old_get1(self)
IPython.html.base.handlers.app_log.debug("Monkeypatching IPython.html.base.handlers.RequestHandler.write_error() in process {}".format(os.getpid()))
# Patch RequestHandler to deal with errors and render them in a half-decent
# error page, templated to look (more or less) like the rest of the site.
@monkeypatch_method(IPython.html.base.handlers.RequestHandler)
def write_error(self, status_code, **kwargs):
# some defaults
error = 'Unknown error'
traceback = 'Not available'
request_info = 'Not available'
import traceback
if self.settings.get('debug') and 'exc_info' in kwargs:
exc_info = kwargs['exc_info']
trace_info = ''.join(["%s<br>" % line for line in traceback.format_exception(*exc_info)])
request_info = ''.join(["<strong>%s</strong>: %s<br>" % (k, self.request.__dict__[k] ) for k in self.request.__dict__.keys()])
error_list = exc_info[1]
if error_list is not None:
error_list = error_list.__str__().split('\n')
error = '<h3>%s</h3>' % error_list[0]
error += '<br>'.join(error_list[1:])
self.set_header('Content-Type', 'text/html')
self.write(self.render_template('error.html', error_status=error, traceback=trace_info, request_info=request_info))
|
Python
| 0
|
@@ -1726,16 +1726,52 @@
session%22
+%0A backup_cookie = %22kbase_session%22
%0A%0A if
@@ -3925,32 +3925,334 @@
book_manager'))%0A
+ else if backup_cookie in self.cookies and hasattr(self, 'notebook_manager'):%0A IPython.html.base.handlers.app_log.debug(%22kbase_session = %22 + self.cookies%5Bbackup_cookie%5D.value)%0A cookie_pusher(self.cookies%5Bbackup_cookie%5D.value, getattr(self,'notebook_manager'))%0A%0A
retu
@@ -4789,32 +4789,182 @@
book_manager'))%0A
+ else if backup_cookie in self.cookies:%0A cookie_pusher( self.cookies%5Bbackup_cookie%5D.value,getattr(self,'notebook_manager'))%0A
retu
|
1189a06433d1a38662124d5799eb2610c31d5100
|
remove commented out code
|
src/buzzfeed/clean_parsed_colombia.py
|
src/buzzfeed/clean_parsed_colombia.py
|
"""
Script to clean the Colombia data from BuzzFeed Zika data repository
Run this script from the root directory
e.g., `~/git/vbi/zika_data_to_cdc'
from there you can run `python src/buzfeed/clean_parsed_colombia.py`
"""
import os
import sys
import re
import pandas as pd
sys.path.append(os.getcwd())
import src.helper as helper
def clean_and_export_municipal(municipal_data_path, places_df, data_guide_df):
num_data = len(municipal_data_path)
for idx, data_path in enumerate(municipal_data_path):
print("cleaning municipal {} of {}".format(idx + 1, num_data))
df = pd.read_csv(data_path)
report_date = helper.get_report_date_from_filepath(data_path)
df['report_date'] = report_date
df['time_period'] = "NA"
df = pd.merge(df, places_df,
left_on=['department', 'municipality'],
right_on=['alt_name1', 'alt_name2'])
melt_columns = [x for x in df.columns if re.search('^zika_', x)]
id_vars = [x for x in df.columns if x not in melt_columns]
df = pd.melt(df,
id_vars=id_vars,
value_vars=melt_columns,
var_name='data_field_original',
value_name='value')
df = pd.merge(df, data_guide_df,
left_on=['data_field_original'],
right_on=['data_field'])
df = helper.subset_columns_for_cdc(df)
df.to_csv('output/colombia-municipal-{}.csv'.format(report_date),
index=False)
# break
def clean_and_export_regional(regional_data_path):
pass
def main():
# here = os.path.abspath(os.path.dirname(__file__))
# print(here)
places_path = '../zika/Colombia/CO_Places.csv'
places = pd.read_csv(places_path)
data_guide_path = '../zika/Colombia/CO_Data_Guide.csv'
data_guide = pd.read_csv(data_guide_path)
buzzfeed_colombia_datasets = helper.get_data_from_path(
os.path.join('..', 'zika-data', 'data',
'parsed', 'colombia', '*.csv'))
print("Datasets found: {}\n".format(buzzfeed_colombia_datasets))
colombia_municipal = [
x for x in buzzfeed_colombia_datasets if re.search('municipal', x)]
colombia_regional = [
x for x in buzzfeed_colombia_datasets if x not in colombia_municipal]
print("municipal datasets: {}\n".format(colombia_municipal))
print("regional datasets: {}\n".format(colombia_regional))
clean_and_export_municipal(colombia_municipal, places, data_guide)
clean_and_export_regional(colombia_regional)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1560,24 +1560,8 @@
se)%0A
- # break%0A
%0A%0Ade
@@ -1636,83 +1636,8 @@
():%0A
- # here = os.path.abspath(os.path.dirname(__file__))%0A # print(here)%0A%0A
|
f47ebbe4dcacdd0ef96799a5d11925e0a8b6d5d5
|
fix import path
|
test/test_resultset.py
|
test/test_resultset.py
|
from unittest import TestCase
from statscraper.base_scraper import ResultSet
from pandas.api import types as ptypes
class TestResultSet(TestCase):
def test_pandas_export(self):
result = ResultSet()
result.append({'city': "Voi", 'value': 45483})
df = result.pandas
self.assertTrue(ptypes.is_numeric_dtype(df.value))
|
Python
| 0.000007
|
@@ -44,21 +44,8 @@
aper
-.base_scraper
imp
|
cf5ad85a35824646a30d90de79d72f4068dade50
|
Fix failing QML test with Qt 5.9 due to assert
|
tests/QtQml/bug_557.py
|
tests/QtQml/bug_557.py
|
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PySide2.QtCore import QUrl
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlEngine, QQmlComponent
app = QGuiApplication(sys.argv)
engine = QQmlEngine()
component = QQmlComponent(engine)
# This should segfault if the QDeclarativeComponent has not QQmlEngine
component.loadUrl(QUrl.fromLocalFile('foo.qml'))
|
Python
| 0
|
@@ -1268,16 +1268,51 @@
port sys
+%0Afrom helper import adjust_filename
%0A%0Afrom P
@@ -1634,17 +1634,44 @@
ile(
-'foo.qml'
+adjust_filename('foo.qml', __file__)
))%0A%0A
|
0c0ca965004e4753a65a9e2984b723c6c59e74ef
|
Fix logging format string.
|
astral/node/base.py
|
astral/node/base.py
|
"""
astral.node.base
==========
Node Base Class.
"""
import threading
import json
import astral.api.app
from astral.models import Node, session, Event
from astral.conf import settings
from astral.exceptions import NetworkError
from astral.api.client import NodesAPI
import logging
log = logging.getLogger(__name__)
class LocalNode(object):
def run(self, uuid_override=None, **kwargs):
# Kind of a hack to make sure logging is set up before we do anything
settings.LOGGING_CONFIG
self.uuid = uuid_override
self.BootstrapThread(node=self.node).start()
self.DaemonThread().start()
try:
astral.api.app.run()
except: # tolerate the bare accept here to make sure we always shutdown
self.shutdown()
def node(self):
return Node.get_by(uuid=self.uuid) or Node.me(uuid_override=self.uuid)
def shutdown(self):
if self.node().supernode:
log.info("Unregistering ourself (%s) from the web server",
self.node())
NodesAPI(settings.ASTRAL_WEBSERVER).unregister(
self.node().absolute_url())
if self.node().primary_supernode:
log.info("Unregistering %s from our primary supernode (%s)",
self.node().primary_supernode)
NodesAPI(self.node().primary_supernode.uri()).unregister(
self.node().absolute_url())
class BootstrapThread(threading.Thread):
"""Runs once at node startup to build knowledge of the network."""
def __init__(self, node):
super(LocalNode.BootstrapThread, self).__init__()
self.node = node
def load_static_bootstrap_nodes(self):
log.info("Loading static bootstrap nodes %s",
settings.BOOTSTRAP_NODES)
nodes = [Node.from_dict(node) for node in settings.BOOTSTRAP_NODES]
session.commit()
log.debug("Loaded static bootstrap nodes %s", nodes)
def load_dynamic_bootstrap_nodes(self, base_url=None):
base_url = base_url or settings.ASTRAL_WEBSERVER
try:
nodes = NodesAPI(base_url).list()
except NetworkError, e:
log.warning("Can't connect to server: %s", e)
else:
log.debug("Nodes returned from the server: %s", nodes)
for node in nodes:
# TODO if we find ourselves with an old IP, need to update
if self.node().conflicts_with(node):
log.warn("Received %s which conflicts with us (%s) "
"-- telling web server to kill it")
NodesAPI(base_url).unregister(
Node.absolute_url(node['uuid']))
else:
node = Node.from_dict(node)
log.info("Stored %s from %s", node, base_url)
def register_with_supernode(self):
Node.update_supernode_rtt()
# TODO hacky hacky hacky. moving query inside of the node causes
# SQLAlchemy session errors.
session.commit()
session.close_all()
if not self.node().supernode:
self.node().primary_supernode = Node.closest_supernode()
if not self.node().primary_supernode:
self.node().supernode = True
log.info("Registering %s as a supernode, none others found",
self.node())
try:
NodesAPI(settings.ASTRAL_WEBSERVER).register(
self.node().to_dict())
except NetworkError, e:
log.warning("Can't connect to server to register as a "
"supernode: %s", e)
else:
try:
NodesAPI(self.node().primary_supernode.uri()).register(
self.node().to_dict())
except NetworkError, e:
# TODO try another?
log.warning("Can't connect to supernode %s to register"
": %s", self.node().primary_supernode, e)
else:
self.load_dynamic_bootstrap_nodes(
self.node().primary_supernode.uri())
else:
# TODO register with all (some?) other supernodes. supernode
# cool kids club.
pass
session.commit()
def run(self):
self.load_static_bootstrap_nodes()
self.load_dynamic_bootstrap_nodes()
self.register_with_supernode()
class DaemonThread(threading.Thread):
"""Background thread for garbage collection and other periodic tasks
outside the scope of the web API.
"""
def __init__(self):
super(LocalNode.DaemonThread, self).__init__()
self.daemon = True
def run(self):
import time
while self.is_alive():
log.debug("Daemon thread woke up")
Event(message=json.dumps({'type': "update"}))
time.sleep(10)
|
Python
| 0.000008
|
@@ -1273,32 +1273,45 @@
+ self.node(),
self.node().pri
|
a5b034704b75496cd1357b66f5fe0bbabb27a114
|
Implement ``plot_tree`` method
|
astrodendro/plot.py
|
astrodendro/plot.py
|
import numpy as np
class DendrogramPlotter(object):
"""
A class to plot a dendrogram object
"""
def __init__(self, dendrogram):
# should we copy to ensure immutability?
self.dendrogram = dendrogram
self._cached_positions = None
self.sort()
def sort(self, sort_key=lambda s: s.get_peak(subtree=True)[1], reverse=False):
"""
Sort the position of the leaves for plotting
Parameters
----------
sort_key : function, optional
This should be a function that takes a
`~astrodendro.structure.Structure` and returns a scalar that is
then used to sort the leaves.
reverse : bool, optional
Whether to reverse the sorting
"""
sorted_trunk_structures = sorted(self.dendrogram.trunk, key=sort_key, reverse=reverse)
positions = {}
x = 0 # the first index for each trunk structure
for structure in sorted_trunk_structures:
# Get sorted leaves
sorted_leaves = structure.get_sorted_leaves(subtree=True, reverse=reverse)
# Loop over leaves and assign positions
for leaf in sorted_leaves:
positions[leaf] = x
x += 1
# Sort structures from the top-down
sorted_structures = sorted(structure.descendants, key=lambda s: s.level, reverse=True) + [structure]
# Loop through structures and assing position of branches as the mean
# of the leaves
for structure in sorted_structures:
if not structure.is_leaf:
positions[structure] = np.mean([positions[child] for child in structure.children])
self._cached_positions = positions
def get_lines(self, structure=None):
"""
Get a collection of lines to draw the dendrogram
Parameters
----------
structure : `~astrodendro.structure.Structure`
The structure to plot. If not set, the whole tree will be plotted.
Returns
-------
lines : `astrodendro.plot.StructureCollection`
The lines (sub-class of LineCollection) which can be directly used in Matplotlib
"""
if self._cached_positions is None:
raise Exception("Leaves have not yet been sorted")
if structure is None:
structures = self.dendrogram.all_nodes
else:
structures = structure.descendants + [structure]
lines = []
mapping = []
for s in structures:
x = self._cached_positions[s]
bot = s.parent.height if s.parent is not None else s.vmin
top = s.height
lines.append(([x, bot], [x, top]))
mapping.append(s)
if s.is_branch:
pc = [self._cached_positions[c] for c in s.children]
lines.append(([min(pc), top], [max(pc), top]))
mapping.append(s)
from .structure_collection import StructureCollection
sc = StructureCollection(lines)
sc.structures = mapping
return sc
|
Python
| 0.000029
|
@@ -1782,16 +1782,1274 @@
itions%0A%0A
+ def plot_tree(self, ax, structure=None, subtree=True, autoscale=True, **kwargs):%0A %22%22%22%0A Plot the dendrogram tree or a substructure%0A%0A Parameters%0A ----------%0A ax : %60matplotlib.axes.Axes%60 instance%0A The Axes inside which to plot the dendrogram%0A structure : int or %60~astrodendro.structure.Structure%60, optional%0A If specified, only plot this structure%0A subtree : bool, optional%0A If a structure is specified, by default the whole subtree will be%0A plotted, but this can be disabled with this option.%0A autoscale : bool, optional%0A Whether to automatically adapt the window limits to the tree%0A%0A Notes%0A -----%0A Any additional keyword arguments are passed to%0A %60~matplotlib.collections.LineCollection%60 and can be used to control the%0A appearance of the plot.%0A %22%22%22%0A%0A # Get the lines for the dendrogram%0A lines = self.get_lines(structure=structure, **kwargs)%0A%0A # Add the lines to the axes%0A ax.add_collection(lines)%0A%0A # Auto-scale axes (doesn't happen by default with %60%60add_collection%60%60)%0A if autoscale:%0A ax.margins(0.05)%0A ax.autoscale_view(True, True, True)%0A%0A
def
@@ -3078,16 +3078,26 @@
ure=None
+, **kwargs
):%0A
@@ -3513,16 +3513,160 @@
tplotlib
+%0A%0A Notes%0A -----%0A Any additional keyword arguments are passed to the%0A %60~matplotlib.collections.LineCollection%60 class.
%0A
@@ -3873,16 +3873,121 @@
else:%0A
+ if type(structure) is int:%0A structure = self.dendrogram.nodes_dict%5Bstructure%5D%0A
@@ -4582,16 +4582,16 @@
lection%0A
-
@@ -4620,16 +4620,26 @@
on(lines
+, **kwargs
)%0A
|
4f03cd1ed7412a545da1274c58733a0d6333eca1
|
Add an option about ridge prior on dropout prob
|
autoencoder/loss.py
|
autoencoder/loss.py
|
import numpy as np
import tensorflow as tf
from keras import backend as K
def _nan2zero(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
def _nelem(x):
nelem = tf.reduce_sum(tf.cast(~tf.is_nan(x), tf.float32))
return tf.cast(tf.where(tf.equal(nelem, 0.), 1., nelem), x.dtype)
def _reduce_mean(x):
nelem = _nelem(x)
x = _nan2zero(x)
return tf.divide(tf.reduce_sum(x), nelem)
def mse_loss(y_true, y_pred):
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
# In the implementations, I try to keep the function signature
# similar to those of Keras objective functions so that
# later on we can use them in Keras smoothly:
# https://github.com/fchollet/keras/blob/master/keras/objectives.py#L7
def poisson_loss(y_true, y_pred):
y_pred = tf.cast(y_pred, tf.float32)
y_true = tf.cast(y_true, tf.float32)
# we can use the Possion PMF from TensorFlow as well
# dist = tf.contrib.distributions
# return -tf.reduce_mean(dist.Poisson(y_pred).log_pmf(y_true))
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# last term can be avoided since it doesn't depend on y_pred
# however keeping it gives a nice lower bound to zero
ret = y_pred - y_true*tf.log(y_pred+1e-10) + tf.lgamma(y_true+1.0)
return tf.divide(tf.reduce_sum(ret), nelem)
# We need a class (or closure) here,
# because it's not possible to
# pass extra arguments to Keras loss functions
# See https://github.com/fchollet/keras/issues/2121
# dispersion (theta) parameter is a scalar by default.
# scale_factor scales the nbinom mean before the
# calculation of the loss to balance the
# learning rates of theta and network weights
class NB(object):
def __init__(self, theta=None, masking=False, scope='nbinom_loss/',
scale_factor=1.0, debug=False):
# for numerical stability
self.eps = 1e-10
self.scale_factor = scale_factor
self.debug = debug
self.scope = scope
self.masking = masking
self.theta = theta
def loss(self, y_true, y_pred, reduce=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
if self.masking:
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# Clip theta
theta = tf.minimum(self.theta, 1e6)
t1 = -tf.lgamma(y_true+theta+eps)
t2 = tf.lgamma(theta+eps)
t3 = tf.lgamma(y_true+1.0)
t4 = -(theta * (tf.log(theta+eps)))
t5 = -(y_true * (tf.log(y_pred+eps)))
t6 = (theta+y_true) * tf.log(theta+y_pred+eps)
assert_ops = [
tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'),
tf.verify_tensor_all_finite(t1, 't1 has inf/nans'),
tf.verify_tensor_all_finite(t2, 't2 has inf/nans'),
tf.verify_tensor_all_finite(t3, 't3 has inf/nans'),
tf.verify_tensor_all_finite(t4, 't4 has inf/nans'),
tf.verify_tensor_all_finite(t5, 't5 has inf/nans'),
tf.verify_tensor_all_finite(t6, 't6 has inf/nans')]
if self.debug:
tf.summary.histogram('t1', t1)
tf.summary.histogram('t2', t2)
tf.summary.histogram('t3', t3)
tf.summary.histogram('t4', t4)
tf.summary.histogram('t5', t5)
tf.summary.histogram('t6', t6)
with tf.control_dependencies(assert_ops):
final = t1 + t2 + t3 + t4 + t5 + t6
else:
final = t1 + t2 + t3 + t4 + t5 + t6
if reduce:
if self.masking:
final = tf.divide(tf.reduce_sum(final), nelem)
else:
final = tf.reduce_mean(final)
return final
class ZINB(NB):
def __init__(self, pi, scope='zinb_loss/', **kwargs):
super().__init__(scope=scope, **kwargs)
self.pi = pi
def loss(self, y_true, y_pred):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
# reuse existing NB neg.log.lik.
nb_case = super().loss(y_true, y_pred, reduce=False) - tf.log(1.0-self.pi+eps)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
theta = tf.minimum(self.theta, 1e6)
zero_nb = tf.pow(theta/(theta+y_pred+eps), theta)
zero_case = -tf.log(self.pi + ((1.0-self.pi)*zero_nb)+eps)
result = tf.where(tf.less(y_true, 1e-8), zero_case, nb_case)
if self.masking:
result = _reduce_mean(result)
else:
result = tf.reduce_mean(result)
if self.debug:
tf.summary.histogram('nb_case', nb_case)
tf.summary.histogram('zero_nb', zero_nb)
tf.summary.histogram('zero_case', zero_case)
return result
|
Python
| 0.013247
|
@@ -4068,16 +4068,34 @@
elf, pi,
+ ridge_lambda=0.0,
scope='
@@ -4186,16 +4186,57 @@
.pi = pi
+%0A self.ridge_lambda = ridge_lambda
%0A%0A de
@@ -4870,24 +4870,109 @@
se, nb_case)
+%0A ridge = self.ridge_lambda*tf.square(self.pi)%0A result += ridge
%0A%0A
@@ -5304,16 +5304,69 @@
ro_case)
+%0A tf.summary.histogram('ridge', ridge)
%0A%0A
|
5392bf25d16166162d53ddc1f063907d72444a92
|
add in new tests for new functionality
|
tests/cloudlet_test.py
|
tests/cloudlet_test.py
|
from cement.core import handler, hook
from cement.utils import test
from nepho import cli
from nepho.cli.base import Nepho
import nose
class NephoTestApp(Nepho):
class Meta:
argv = []
config_files = []
# Test Cloudlet
class a_TestNephoCloudlet(test.CementTestCase):
app_class = NephoTestApp
def setUp(self):
super(a_TestNephoCloudlet, self).setUp()
self.reset_backend()
app = self.make_app(argv=['cloudlet', 'registry-update'])
app.setup()
app.run()
app.close()
app = self.make_app(argv=['cloudlet', '--quiet', 'uninstall', 'nepho-example'])
app.setup()
app.run()
app.close()
app = self.make_app(argv=['cloudlet', '--quiet', 'install', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_registry_update(self):
app = self.make_app(argv=['cloudlet', 'registry-update'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_update_registry(self):
app = self.make_app(argv=['cloudlet', 'update-registry'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_list(self):
app = self.make_app(argv=['cloudlet', 'list'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_search(self):
app = self.make_app(argv=['cloudlet', 'search', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_install(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'install', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_describe(self):
app = self.make_app(argv=['cloudlet', 'describe', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_update(self):
app = self.make_app(argv=['cloudlet', 'update', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_upgrade(self):
app = self.make_app(argv=['cloudlet', 'upgrade', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_uninstall(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'uninstall', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_remove(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'remove', 'nepho-example'])
app.setup()
app.run()
app.close()
|
Python
| 0
|
@@ -1173,32 +1173,598 @@
app.close()%0A%0A
+ def test_nepho_cloudlet_list_directories(self):%0A app = self.make_app(argv=%5B'cloudlet', 'directory-list'%5D)%0A app.setup()%0A app.run()%0A app.close()%0A%0A def test_nepho_cloudlet_add_directories(self):%0A app = self.make_app(argv=%5B'cloudlet', 'directory-add', '--directory', '.'%5D)%0A app.setup()%0A app.run()%0A app.close()%0A%0A def test_nepho_cloudlet_rm_directories(self):%0A app = self.make_app(argv=%5B'cloudlet', 'directory-remove', '--directory', '.'%5D)%0A app.setup()%0A app.run()%0A app.close()%0A%0A
def test_nep
@@ -2068,32 +2068,207 @@
app.close()%0A%0A
+ def test_nepho_cloudlet_create(self):%0A app = self.make_app(argv=%5B'cloudlet', 'create', 'test-cloudlet'%5D)%0A app.setup()%0A app.run()%0A app.close()%0A%0A
def test_nep
|
2ddfb4f0f4f2de060399a6e5b519a7f4b788ace5
|
make it possible to show languages for selected values on a map
|
autotyp/adapters.py
|
autotyp/adapters.py
|
from sqlalchemy.orm import joinedload
from clld.interfaces import IParameter, ILanguage, IIndex
from clld.db.meta import DBSession
from clld.db.models.common import ValueSet
from clld.web.adapters.base import Index
from clld.web.adapters.geojson import GeoJsonParameter
from clld.web.maps import SelectedLanguagesMap
class GeoJsonFeature(GeoJsonParameter):
def feature_iterator(self, ctx, req):
return DBSession.query(ValueSet).filter(ValueSet.parameter_pk == ctx.pk)\
.options(joinedload(ValueSet.language))
def feature_properties(self, ctx, req, valueset):
return {}
class MapView(Index):
extension = str('map.html')
mimetype = str('text/vnd.clld.map+html')
send_mimetype = str('text/html')
template = 'language/map_html.mako'
def template_context(self, ctx, req):
languages = list(ctx.get_query(limit=8000))
return {
'map': SelectedLanguagesMap(ctx, req, languages),
'languages': languages}
def includeme(config):
config.register_adapter(GeoJsonFeature, IParameter)
config.register_adapter(MapView, ILanguage, IIndex)
|
Python
| 0
|
@@ -73,23 +73,20 @@
meter, I
-Languag
+Valu
e, IInde
@@ -845,16 +845,45 @@
= list(
+v.valueset.language for v in
ctx.get_
@@ -1135,23 +1135,20 @@
pView, I
-Languag
+Valu
e, IInde
|
4090dac50c958a6a0876033a7d662e30c81b57ed
|
Fix client having too short a timeout in remote TLS usage
|
confluent_client/confluent/client.py
|
confluent_client/confluent/client.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import anydbm as dbm
import errno
import hashlib
import os
import socket
import ssl
import sys
import confluent.tlvdata as tlvdata
SO_PASSCRED = 16
def _parseserver(string):
if ']:' in string:
server, port = string[1:].split(']:')
elif string[0] == '[':
server = string[1:-1]
port = '13001'
elif ':' in string:
server, port = string.split(':')
else:
server = string
port = '13001'
return server, port
class Command(object):
def __init__(self, server=None):
self.connection = None
if server is None:
if 'CONFLUENT_HOST' in os.environ:
self.serverloc = os.environ['CONFLUENT_HOST']
else:
self.serverloc = '/var/run/confluent/api.sock'
else:
self.serverloc = server
if os.path.isabs(self.serverloc) and os.path.exists(self.serverloc):
self._connect_unix()
else:
self._connect_tls()
tlvdata.recv(self.connection)
authdata = tlvdata.recv(self.connection)
if authdata['authpassed'] == 1:
self.authenticated = True
else:
self.authenticated = False
if not self.authenticated and 'CONFLUENT_USER' in os.environ:
username = os.environ['CONFLUENT_USER']
passphrase = os.environ['CONFLUENT_PASSPHRASE']
self.authenticate(username, passphrase)
def authenticate(self, username, password):
tlvdata.send(self.connection,
{'username': username, 'password': password})
authdata = tlvdata.recv(self.connection)
if authdata['authpassed'] == 1:
self.authenticated = True
def handle_results(self, ikey, rc, res):
if 'error' in res:
sys.stderr.write('Error: {0}\n'.format(res['error']))
if 'errorcode' in res:
return res['errorcode']
else:
return 1
if 'databynode' not in res:
return 0
res = res['databynode']
for node in res:
if 'error' in res[node]:
sys.stderr.write('{0}: Error: {1}\n'.format(
node, res[node]['error']))
if 'errorcode' in res[node]:
rc |= res[node]['errorcode']
else:
rc |= 1
elif ikey in res[node]:
print('{0}: {1}'.format(node, res[node][ikey]['value']))
return rc
def simple_noderange_command(self, noderange, resource, input=None,
key=None):
try:
rc = 0
if resource[0] == '/':
resource = resource[1:]
# The implicit key is the resource basename
if key is None:
ikey = resource.rpartition('/')[-1]
else:
ikey = key
if input is None:
for res in self.read('/noderange/{0}/{1}'.format(
noderange, resource)):
rc = self.handle_results(ikey, rc, res)
else:
for res in self.update('/noderange/{0}/{1}'.format(
noderange, resource), {ikey: input}):
rc = self.handle_results(ikey, rc, res)
return rc
except KeyboardInterrupt:
print('')
return 0
def read(self, path, parameters=None):
return send_request('retrieve', path, self.connection, parameters)
def update(self, path, parameters=None):
return send_request('update', path, self.connection, parameters)
def create(self, path, parameters=None):
return send_request('create', path, self.connection, parameters)
def delete(self, path, parameters=None):
return send_request('delete', path, self.connection, parameters)
def _connect_unix(self):
self.connection = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.connection.setsockopt(socket.SOL_SOCKET, SO_PASSCRED, 1)
self.connection.connect(self.serverloc)
def _connect_tls(self):
server, port = _parseserver(self.serverloc)
for res in socket.getaddrinfo(server, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.connection = socket.socket(af, socktype, proto)
self.connection.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except:
self.connection = None
continue
try:
self.connection.settimeout(5)
self.connection.connect(sa)
except:
raise
self.connection.close()
self.connection = None
continue
break
if self.connection is None:
raise Exception("Failed to connect to %s" % self.serverloc)
#TODO(jbjohnso): server certificate validation
clientcfgdir = os.path.join(os.path.expanduser("~"), ".confluent")
try:
os.makedirs(clientcfgdir)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(clientcfgdir)):
raise
cacert = os.path.join(clientcfgdir, "ca.pem")
certreqs = ssl.CERT_REQUIRED
knownhosts = False
if not os.path.exists(cacert):
cacert = None
certreqs = ssl.CERT_NONE
knownhosts = True
self.connection = ssl.wrap_socket(self.connection, ca_certs=cacert,
cert_reqs=certreqs,
ssl_version=ssl.PROTOCOL_TLSv1)
if knownhosts:
certdata = self.connection.getpeercert(binary_form=True)
fingerprint = 'sha512$' + hashlib.sha512(certdata).hexdigest()
hostid = '@'.join((port,server))
khf = dbm.open(os.path.join(clientcfgdir, "knownhosts"), 'c', 384)
if hostid in khf:
if fingerprint == khf[hostid]:
return
else:
replace = raw_input(
"MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):")
if replace not in ('y', 'Y'):
raise Exception("BAD CERTIFICATE")
print 'Adding new key for %s:%s' % (server, port)
khf[hostid] = fingerprint
def send_request(operation, path, server, parameters=None):
"""This function iterates over all the responses
received from the server.
:param operation: The operation to request, retrieve, update, delete,
create, start, stop
:param path: The URI path to the resource to operate on
:param server: The socket to send data over
:param parameters: Parameters if any to send along with the request
"""
payload = {'operation': operation, 'path': path}
if parameters is not None:
payload['parameters'] = parameters
tlvdata.send(server, payload)
result = tlvdata.recv(server)
while '_requestdone' not in result:
yield result
result = tlvdata.recv(server)
|
Python
| 0
|
@@ -5415,16 +5415,65 @@
ect(sa)%0A
+ self.connection.settimeout(None)%0A
|
e3d082588db63690a846007beb8ddd42ebd4144e
|
Include pages urls into the main url patterns
|
config/urls.py
|
config/urls.py
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# To be removed
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
Python
| 0.000001
|
@@ -363,21 +363,20 @@
#
-To be removed
+Custom urls%0A
%0A
@@ -387,170 +387,51 @@
(r'%5E
-$
',
-TemplateView.as_view(template_name='pages/home.html'), name='home'),%0A url(r'%5Eabout/$', TemplateView.as_view(template_name='pages/about.html'), name='about'
+include('pages.urls', namespace='pages')
),%0A%0A
|
2f412d6d98e6b03c1e3997d6acb0d15cace12e28
|
remove trailing spaces
|
coopy/utils.py
|
coopy/utils.py
|
def method_or_none(instance, name):
method = getattr(instance, name)
if (name[0:2] == '__' and name[-2,:] == '__') or \
not callable(method) :
return None
return method
def action_check(obj):
return (hasattr(obj, '__readonly'),
hasattr(obj, '__unlocked'),
hasattr(obj, '__abort_exception'))
def inject(obj, name, dependency):
obj.__dict__[name] = dependency
|
Python
| 0.999463
|
@@ -218,23 +218,16 @@
method%0A
-
%0Adef act
@@ -281,17 +281,16 @@
donly'),
-
%0A
@@ -368,17 +368,16 @@
ption'))
-
%0A%0Adef in
|
919a4f183e9a09ded7cf6272f9be300f22408c08
|
fix method or none method name comparison
|
coopy/utils.py
|
coopy/utils.py
|
def method_or_none(instance, name):
method = getattr(instance, name)
if (name[0:2] == '__' and name[-2,:] == '__') or \
not callable(method) :
return None
return method
def action_check(obj):
return (hasattr(obj, '__readonly'),
hasattr(obj, '__unlocked'),
hasattr(obj, '__abort_exception'))
def inject(obj, name, dependency):
obj.__dict__[name] = dependency
|
Python
| 0.000002
|
@@ -103,17 +103,16 @@
name%5B-2
-,
:%5D == '_
|
b59f21ee28cc8eaf56cbc49fd7926e243e92276f
|
Fix bug for users with Space inside their usernames.
|
core/models.py
|
core/models.py
|
from django.core.exceptions import AppRegistryNotReady
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.utils.translation import ugettext as _
class Profile(models.Model):
about_me = models.TextField(
_('About me'), max_length=500, null=True, blank=True)
github = models.CharField(
_('Github username'), max_length=50, null=True, blank=True)
facebook = models.CharField(
_('Facebook username'), max_length=50, null=True, blank=True)
site = models.URLField(
_('Site url'), max_length=200, null=True, blank=True)
# relations
user = models.OneToOneField(to=settings.AUTH_USER_MODEL)
class Meta:
verbose_name = _('Profile')
def __unicode__(self):
return self.user.get_full_name()
def get_absolute_url(self):
return reverse_lazy(
'user_profile', kwargs={'user__username': self.user.username})
def get_github_url(self):
if self.github:
return 'http://github.com/{}'.format(self.github)
def get_facebook_url(self):
if self.facebook:
return 'http://facebook.com/{}'.format(self.facebook)
def get_site_url(self):
return self.site
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except AppRegistryNotReady:
from django.contrib.auth.models import User
post_save.connect(create_user_profile, sender=User)
|
Python
| 0
|
@@ -206,16 +206,26 @@
ost_save
+, pre_save
%0Afrom dj
@@ -1447,16 +1447,133 @@
ance)%0A%0A%0A
+def slugify_user_username(sender, instance, **kwargs):%0A instance.username = instance.username.replace(' ', '_')%0A%0A%0A
try:%0A
@@ -1777,8 +1777,61 @@
r=User)%0A
+pre_save.connect(slugify_user_username, sender=User)%0A
|
b1eb69620bbe875d117498ed95e009a019e54fab
|
Fix vote app URL patterns
|
votes/urls.py
|
votes/urls.py
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView, results, system_home
urlpatterns = [
url(r'^$', system_home, name="system"),
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view(), name="vote"),
url(r'^(?P<vote_name>[\w-]+)/results$', results, name="results"),
]
|
Python
| 0.000002
|
@@ -231,16 +231,17 @@
%3E%5B%5Cw-%5D+)
+/
$', Vote
@@ -310,16 +310,17 @@
/results
+/
$', resu
|
1d0b114c7e918c87e14d9ea7a7c49cb9120db68b
|
Bump version (#128)
|
vt/version.py
|
vt/version.py
|
"""Defines VT release version."""
__version__ = '0.17.2'
|
Python
| 0
|
@@ -52,7 +52,7 @@
.17.
-2
+3
'%0A
|
1034699a21dc0cf4862624d076d487deae7df9e2
|
add NullHandler to avoid "no handlers could be found" error.
|
Lib/fontTools/__init__.py
|
Lib/fontTools/__init__.py
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
version = "3.0"
|
Python
| 0
|
@@ -91,16 +91,247 @@
import *
+%0Aimport logging%0A%0A# add a do-nothing handler to the libary's top-level logger, to avoid%0A# %22no handlers could be found%22 error if client doesn't configure logging%0Alog = logging.getLogger(__name__)%0Alog.addHandler(logging.NullHandler())
%0A%0Aversio
|
44620b2fa69500e1cada5622fa96eedd9c931006
|
Add test for MessageBeep()
|
Lib/test/test_winsound.py
|
Lib/test/test_winsound.py
|
# Ridiculously simple test of the winsound module for Windows.
import winsound
for i in range(100, 2000, 100):
winsound.Beep(i, 75)
print "Hopefully you heard some sounds increasing in frequency!"
|
Python
| 0
|
@@ -72,16 +72,22 @@
winsound
+, time
%0Afor i i
@@ -201,8 +201,351 @@
uency!%22%0A
+winsound.MessageBeep()%0Atime.sleep(0.5)%0Awinsound.MessageBeep(winsound.MB_OK)%0Atime.sleep(0.5)%0Awinsound.MessageBeep(winsound.MB_ICONASTERISK)%0Atime.sleep(0.5)%0Awinsound.MessageBeep(winsound.MB_ICONEXCLAMATION)%0Atime.sleep(0.5)%0Awinsound.MessageBeep(winsound.MB_ICONHAND)%0Atime.sleep(0.5)%0Awinsound.MessageBeep(winsound.MB_ICONQUESTION)%0Atime.sleep(0.5)%0A
|
df77f4de261e3f21cc95f56fbb4dd738c02a2dd1
|
Put all test metrics on the same row of the dataframe.
|
src/graph_world/models/benchmarker.py
|
src/graph_world/models/benchmarker.py
|
import json
import os
from abc import ABC, abstractmethod
import apache_beam as beam
import gin
import pandas as pd
class Benchmarker(ABC):
def __init__(self):
self._model_name = ''
def GetModelName(self):
return self._model_name
# Train and test the model.
# Arguments:
# * element: output of the 'Convert to torchgeo' beam stage.
# * output_path: where to save logs and data.
# Returns:
# * named dict with keys/vals:
# 'losses': iterable of loss values over the epochs.
# 'test_metrics': dict of named test metrics for the benchmark run.
@abstractmethod
def Benchmark(self, element):
del element # unused
return {'losses': [], 'test_metrics': {}}
class BenchmarkerWrapper(ABC):
@abstractmethod
def GetBenchmarker(self):
return Benchmarker()
# These two functions would be unnecessary if we were using Python 3.7. See:
# - https://github.com/huggingface/transformers/issues/8453
# - https://github.com/huggingface/transformers/issues/8212
@abstractmethod
def GetBenchmarkerClass(self):
return Benchmarker
@abstractmethod
def GetModelHparams(self):
return {}
class BenchmarkGNNParDo(beam.DoFn):
# The commented lines here, and those in process, could be uncommented and
# replace the alternate code below it, if we were using Python 3.7. See:
# - https://github.com/huggingface/transformers/issues/8453
# - https://github.com/huggingface/transformers/issues/8212
def __init__(self, benchmarker_wrappers):
# self._benchmarkers = [benchmarker_wrapper().GetBenchmarker() for
# benchmarker_wrapper in benchmarker_wrappers]
self._benchmarker_classes = [benchmarker_wrapper().GetBenchmarkerClass() for
benchmarker_wrapper in benchmarker_wrappers]
self._model_hparams = [benchmarker_wrapper().GetModelHparams() for
benchmarker_wrapper in benchmarker_wrappers]
# /end alternate code.
self._output_path = None
def SetOutputPath(self, output_path):
self._output_path = output_path
def process(self, element):
# for benchmarer in self._benchmarkers:
for benchmarker_class, model_hparams in zip(self._benchmarker_classes, self._model_hparams):
sample_id = element['sample_id']
# benchmarker_out = self._benchmarker.Benchmark(element)
benchmarker = benchmarker_class(**model_hparams)
benchmarker_out = benchmarker.Benchmark(element)
# /end alternate code.
# Dump benchmark results to file.
benchmark_result = {
'sample_id': sample_id,
'losses': benchmarker_out['losses'],
'generator_config': element['generator_config']
}
benchmark_result.update(benchmarker_out['test_metrics'])
results_object_name = os.path.join(self._output_path, '{0:05}_results.txt'.format(sample_id))
with beam.io.filesystems.FileSystems.create(results_object_name, 'text/plain') as f:
buf = bytes(json.dumps(benchmark_result), 'utf-8')
f.write(buf)
f.close()
# Return benchmark data for next beam stage.
output_data = benchmarker_out['test_metrics']
output_data.update(element['generator_config'])
output_data.update(element['metrics'])
output_data['model_name'] = benchmarker.GetModelName()
yield pd.DataFrame(output_data, index=[sample_id])
|
Python
| 0
|
@@ -2118,24 +2118,45 @@
, element):%0A
+ output_data = %7B%7D%0A
# for be
@@ -3144,29 +3144,33 @@
.%0A
-output_data =
+for key, value in
benchma
@@ -3193,19 +3193,112 @@
etrics'%5D
-%0A
+.items():%0A
+ output_data%5B%0A '%25s__%25s' %25 (benchmarker.GetModelName(), key)%5D = value%0A%0A
outp
@@ -3337,26 +3337,24 @@
r_config'%5D)%0A
-
output_d
@@ -3388,71 +3388,8 @@
'%5D)%0A
- output_data%5B'model_name'%5D = benchmarker.GetModelName()%0A
|
886105ba5f4a8b53fbf5a39f7cb3dc48ce544a3a
|
add total days check
|
cgi-bin/oa-gdd.py
|
cgi-bin/oa-gdd.py
|
#!/usr/bin/python
"""
Produce a OA GDD Plot, dynamically!
$Id: $:
"""
import sys, os
sys.path.insert(0, '/mesonet/www/apps/iemwebsite/scripts/lib')
os.environ[ 'HOME' ] = '/tmp/'
os.environ[ 'USER' ] = 'nobody'
import iemplot
import cgi
import datetime
import network
import iemdb
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
form = cgi.FieldStorage()
if ("year1" in form and "year2" in form and
"month1" in form and "month2" in form and
"day1" in form and "day2" in form):
sts = datetime.datetime(int(form["year1"].value),
int(form["month1"].value), int(form["day1"].value))
ets = datetime.datetime(int(form["year2"].value),
int(form["month2"].value), int(form["day2"].value))
else:
sts = datetime.datetime(2011,5,1)
ets = datetime.datetime(2011,10,1)
baseV = 50
if "base" in form:
baseV = int(form["base"].value)
maxV = 86
if "max" in form:
maxV = int(form["max"].value)
# Make sure we aren't in the future
now = datetime.datetime.today()
if ets > now:
ets = now
st = network.Table("IACLIMATE")
# Now we load climatology
#sts = {}
#rs = mesosite.query("SELECT id, x(geom) as lon, y(geom) as lat from stations WHERE \
# network = 'IACLIMATE'").dictresult()
#for i in range(len(rs)):
# sts[ rs[i]["id"].lower() ] = rs[i]
# Compute normal from the climate database
sql = """SELECT station,
sum(gddXX(%s, %s, high, low)) as gdd
from alldata_ia WHERE year = %s and day >= '%s' and day < '%s'
GROUP by station""" % (baseV, maxV, sts.year, sts.strftime("%Y-%m-%d"),
ets.strftime("%Y-%m-%d"))
lats = []
lons = []
gdd50 = []
valmask = []
ccursor.execute(sql)
for row in ccursor:
id = row[0]
if not st.sts.has_key(id):
continue
lats.append( st.sts[id]['lat'] )
lons.append( st.sts[id]['lon'] )
gdd50.append( row[1] )
valmask.append( True )
cfg = {
'wkColorMap': 'BlAqGrYeOrRe',
'nglSpreadColorStart': 2,
'nglSpreadColorEnd' : -1,
'_showvalues' : True,
'_valueMask' : valmask,
'_format' : '%.0f',
'_title' : "Iowa %s thru %s GDD(base=%s,max=%s) Accumulation" % (
sts.strftime("%Y: %d %b"),
(ets - datetime.timedelta(days=1)).strftime("%d %b"),
baseV, maxV),
'lbTitleString' : "F",
}
# Generates tmp.ps
tmpfp = iemplot.simple_contour(lons, lats, gdd50, cfg)
iemplot.webprocess(tmpfp)
|
Python
| 0.000006
|
@@ -1407,16 +1407,26 @@
) as gdd
+, count(*)
%0A from
@@ -1679,16 +1679,46 @@
te(sql)%0A
+total_days = (ets - sts).days%0A
for row
@@ -1785,16 +1785,63 @@
ontinue%0A
+ if row%5B2%5D %3C (total_days * 0.9):%0A continue%0A
lats.a
|
53a86e2318256e6edcca3d1e4ce2981a29bd8208
|
Add flask-email configs
|
web/config.py
|
web/config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfiguration(object):
DEBUG = False
TESTING = False
ADMINS = frozenset(['youremail@yourdomain.com'])
SECRET_KEY = 'SecretKeyForSessionSigning'
THREADS_PER_PAGE = 8
DATABASE = 'app.db'
DATABASE_PATH = os.path.join(basedir, DATABASE)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
SECURITY_PASSWORD_HASH = 'sha512_crypt'
SECURITY_PASSWORD_SALT = 'SuPeRsEcReTsAlT'
SECURITY_POST_LOGIN_VIEW = '/ViewProfile'
SECURITY_CHANGEABLE = True
SECURITY_REGISTERABLE = True
SECURITY_TRACKABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SECURITY_SEND_PASSWORD_CHANGE_EMAIL = False
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = False
MAIL_SUPPRESS_SEND = True
class DockerConfig(BaseConfiguration):
SECRET_KEY = os.environ.get('SECRET_KEY')
DB_NAME = os.environ.get('DB_NAME')
DB_USER = os.environ.get('DB_USER')
DB_PASS = os.environ.get('DB_PASS')
DB_SERVICE = os.environ.get('DB_SERVICE')
DB_PORT = os.environ.get('DB_PORT')
SQLALCHEMY_DATABASE_URI = 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(
DB_USER, DB_PASS, DB_SERVICE, DB_PORT, DB_NAME
)
RQ_DEFAULT_HOST="redis_1"
RQ_DEFAULT_PORT=6379
class DebugConfiguration(DockerConfig):
DEBUG = True
class TestConfiguration(BaseConfiguration):
TESTING = True
DATABASE = 'tests.db'
DATABASE_PATH = os.path.join(basedir, DATABASE)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
|
Python
| 0.000001
|
@@ -1283,16 +1283,217 @@
T=6379%0A%0A
+ MAIL_SERVER = %22smtp_server.usgo.org%22%0A MAIL_PORT = 587%0A MAIL_USE_TLS = True%0A MAIL_USERNAME = %22noreply@usgo.org%22%0A MAIL_PASSWORD = %22password%22%0A MAIL_DEFAULT_SENDER = %22noreply@usgo.org%22%0A%0A
class De
|
21c9764f7d747d16e72548f98c73df4c26aa1e95
|
Optimize iterating samples
|
src/zephyr/collector.py
|
src/zephyr/collector.py
|
import threading
import collections
import zephyr
class EventStream:
def __init__(self):
self.events = []
self.events_cleaned_up = 0
self.lock = threading.RLock()
def __iter__(self):
with self.lock:
return iter(self.events[:])
def __len__(self):
with self.lock:
corrected_length = len(self.events) + self.events_cleaned_up
return corrected_length
def __getitem__(self, index):
with self.lock:
assert 0 <= index < len(self)
assert index >= self.events_cleaned_up
corrected_index = index - self.events_cleaned_up
return self.events[corrected_index]
def append(self, value):
with self.lock:
self.events.append(value)
def clean_up_events_before(self, timestamp_lower_bound):
with self.lock:
cutoff_index = 0
for event_timestamp, event_value in self.events: #@UnusedVariable
if event_timestamp < timestamp_lower_bound:
cutoff_index += 1
else:
break
if cutoff_index:
self.events = self.events[cutoff_index:]
self.events_cleaned_up += cutoff_index
def iterate_samples(self, from_sample_index, to_end_timestamp):
sample_index = from_sample_index
while True:
if len(self) > sample_index:
event_timestamp, event_value = self[sample_index]
if event_timestamp <= to_end_timestamp:
yield event_value
sample_index += 1
continue
break
class SignalStream:
def __init__(self, signal_packet):
self.samplerate = signal_packet.samplerate
self.samples = []
self.lock = threading.RLock()
self.end_timestamp = None
self.append_signal_packet(signal_packet)
def append_signal_packet(self, signal_packet):
with self.lock:
assert signal_packet.samplerate == self.samplerate
self.samples.extend(signal_packet.samples)
self.end_timestamp = signal_packet.timestamp + len(signal_packet.samples) / float(signal_packet.samplerate)
def remove_samples_before(self, timestamp_lower_bound):
with self.lock:
samples_to_remove = max(0, int((timestamp_lower_bound - self.start_timestamp) * self.samplerate))
if samples_to_remove:
self.samples = self.samples[samples_to_remove:]
return samples_to_remove
@property
def start_timestamp(self):
return self.end_timestamp - len(self.samples) / float(self.samplerate)
def iterate_timed_samples(self):
with self.lock:
start_timestamp = self.start_timestamp
sample_period = 1.0 / self.samplerate
for sample_i, sample in enumerate(self.samples):
sample_timestamp = start_timestamp + sample_i * sample_period
yield sample_timestamp, sample
class SignalStreamHistory:
def __init__(self):
self._signal_streams = []
self.samples_cleaned_up = 0
def append_signal_packet(self, signal_packet, starts_new_stream):
if starts_new_stream or not len(self._signal_streams):
signal_stream = SignalStream(signal_packet)
self._signal_streams.append(signal_stream)
else:
signal_stream = self._signal_streams[-1]
signal_stream.append_signal_packet(signal_packet)
def get_signal_streams(self):
return self._signal_streams
def _cleanup_signal_stream(self, signal_stream, timestamp_bound):
if timestamp_bound >= signal_stream.end_timestamp:
self._signal_streams.remove(signal_stream)
samples_removed = len(signal_stream.samples)
else:
samples_removed = signal_stream.remove_samples_before(timestamp_bound)
self.samples_cleaned_up += samples_removed
def clean_up_samples_before(self, history_limit):
for signal_stream in self._signal_streams[:]:
first_timestamp = signal_stream.start_timestamp
if first_timestamp >= history_limit:
break
self._cleanup_signal_stream(signal_stream, history_limit)
def iterate_samples(self, from_sample_index, to_end_timestamp):
from_sample_index = from_sample_index - self.samples_cleaned_up
signal_stream_start_index = 0
for signal_stream in self._signal_streams:
sample_count = len(signal_stream.samples)
next_signal_stream_start_index = signal_stream_start_index + sample_count
if from_sample_index < next_signal_stream_start_index:
for local_sample_index, (sample_timestamp, sample) in enumerate(signal_stream.iterate_timed_samples()):
global_sample_index = signal_stream_start_index + local_sample_index
if global_sample_index < from_sample_index:
continue
elif sample_timestamp > to_end_timestamp:
break
yield sample
signal_stream_start_index = next_signal_stream_start_index
class MeasurementCollector:
def __init__(self, history_length_seconds=20.0):
self._signal_stream_histories = collections.defaultdict(SignalStreamHistory)
self._event_streams = collections.defaultdict(EventStream)
self.history_length_seconds = history_length_seconds
self.last_cleanup_time = 0.0
def get_signal_stream_history(self, stream_type):
return self._signal_stream_histories[stream_type]
def get_event_stream(self, stream_type):
return self._event_streams[stream_type]
def iterate_signal_stream_histories(self):
return self._signal_stream_histories.items()
def iterate_event_streams(self):
return self._event_streams.items()
def handle_signal(self, signal_packet, starts_new_stream):
signal_stream_history = self._signal_stream_histories[signal_packet.type]
signal_stream_history.append_signal_packet(signal_packet, starts_new_stream)
self.cleanup_if_needed()
def handle_event(self, stream_name, value):
self._event_streams[stream_name].append(value)
self.cleanup_if_needed()
def cleanup_if_needed(self):
now = zephyr.time()
if self.last_cleanup_time < now - 5.0:
history_limit = now - self.history_length_seconds
for signal_stream_history in self._signal_stream_histories.values():
signal_stream_history.clean_up_samples_before(history_limit)
for event_stream in self._event_streams.values():
event_stream.clean_up_events_before(history_limit)
self.last_cleanup_time = now
|
Python
| 0.000029
|
@@ -2992,24 +2992,40 @@
samples(self
+, skip_samples=0
):%0D%0A
@@ -3216,16 +3216,51 @@
.samples
+%5Bskip_samples:%5D, start=skip_samples
):%0D%0A
@@ -5239,139 +5239,37 @@
-for local_sample_index, (sample_timestamp, sample) in enumerate(signal_stream.iterate_timed_samples()):%0D%0A global
+samples_to_skip = max(0, from
_sam
@@ -5274,25 +5274,25 @@
ample_index
-=
+-
signal_stre
@@ -5309,35 +5309,11 @@
ndex
- + local_sample_index%0D%0A
+)%0D%0A
@@ -5346,113 +5346,115 @@
- if global_
+for sample_timestamp,
sample
-_index %3C from_sample_index:%0D%0A continue%0D%0A el
+ in signal_stream.iterate_timed_samples(samples_to_skip):%0D%0A
if s
|
2c4a35ca944a342792fb934b17b156a77a090d3c
|
Add concat to FormattedTextStringFormat
|
bot/action/util/textformat.py
|
bot/action/util/textformat.py
|
from bot.api.domain import Message
class FormattedText:
def __init__(self, mode="HTML"):
self.formatter = TextFormatterFactory.get_for_mode(mode)
self.mode = mode
self.text = ""
def normal(self, text: str):
self.text += self._escaped(text)
return self
def bold(self, text: str):
self.text += self.formatter.bold(self._escaped(text))
return self
def italic(self, text: str):
self.text += self.formatter.italic(self._escaped(text))
return self
def url(self, text: str, url: str):
self.text += self.formatter.url(self._escaped(text), self._escaped(url))
return self
def code_inline(self, text: str):
self.text += self.formatter.code_inline(self._escaped(text))
return self
def code_block(self, text: str):
self.text += self.formatter.code_block(self._escaped(text))
return self
def newline(self):
self.text += "\n"
return self
def concat(self, formatted_text):
""":type formatted_text: FormattedText"""
assert self._is_compatible(formatted_text), "Cannot concat text with different modes"
self.text += formatted_text.text
return self
def join(self, formatted_texts):
""":type formatted_texts: list[FormattedText]"""
formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator
for formatted_text in formatted_texts:
assert self._is_compatible(formatted_text), "Cannot join text with different modes"
self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))
return self
def _is_compatible(self, formatted_text):
""":type formatted_text: FormattedText"""
return self.mode == formatted_text.mode
def build_message(self):
return Message.create(self.text, parse_mode=self.mode)
def _escaped(self, text):
if type(text) is not str:
text = str(text)
return self.formatter.escape(text)
def start_format(self):
return FormattedTextStringFormat(self)
class TextFormatter:
def escape(self, text):
return text
def bold(self, text):
return text
def italic(self, text):
return text
def url(self, text, url):
return text + " (" + url + ")"
def code_inline(self, text):
return text
def code_block(self, text):
return text
class HtmlTextFormatter(TextFormatter):
def escape(self, text):
return text.replace("&", "&")\
.replace("<", "<")\
.replace(">", ">")\
.replace("\"", """)
def bold(self, text):
return self._surround_with_tag(text, "b")
def italic(self, text):
return self._surround_with_tag(text, "i")
def url(self, text, url):
return self._surround_with_tag(text, "a", href=url)
def code_inline(self, text):
return self._surround_with_tag(text, "code")
def code_block(self, text):
return self._surround_with_tag(text, "pre")
@staticmethod
def _surround_with_tag(text, tag, **attributes):
attributes_string = ""
for name, value in attributes.items():
attributes_string += " " + name + "=\"" + value + "\""
open_tag = "<" + tag + attributes_string + ">"
close_tag = "</" + tag + ">"
return open_tag + text + close_tag
class MarkdownTextFormatter(TextFormatter):
def escape(self, text):
return text.replace("[", "\\[")\
.replace("_", "\\_")\
.replace("*", "\\*")\
.replace("`", "\\`")
def bold(self, text):
return self._wrap(text, "*")
def italic(self, text):
return self._wrap(text, "_")
def url(self, text, url):
return self._wrap(text, "[", "]") + self._wrap(url, "(", ")")
def code_inline(self, text):
return self._wrap(text, "`")
def code_block(self, text):
return self._wrap(text, "```")
@staticmethod
def _wrap(text, wrapping_chars, close_wrapping_chars=None):
if close_wrapping_chars is None:
close_wrapping_chars = wrapping_chars
return wrapping_chars + text + close_wrapping_chars
class TextFormatterFactory:
markdown = MarkdownTextFormatter()
html = HtmlTextFormatter()
@classmethod
def get_for_mode(cls, mode):
if mode == "Markdown":
return cls.get_markdown_formatter()
elif mode == "HTML":
return cls.get_html_formatter()
else:
raise Exception("Unknown TextFormatter requested (" + mode + ")")
@classmethod
def get_markdown_formatter(cls):
return cls.markdown
@classmethod
def get_html_formatter(cls):
return cls.html
class FormattedTextStringFormat:
def __init__(self, formatted_text):
self.formatted_text = formatted_text
self.formatter = formatted_text.formatter
self.format_args = []
self.format_kwargs = {}
def normal(self, *args, **kwargs):
self._add(lambda x: x, args, kwargs)
return self
def bold(self, *args, **kwargs):
self._add(self.formatter.bold, args, kwargs)
return self
def italic(self, *args, **kwargs):
self._add(self.formatter.italic, args, kwargs)
return self
def url(self, text: str, url: str, name=None):
text = self.formatter.url(self._escaped(text), self._escaped(url))
if name is None:
self.format_args.append(text)
else:
self.format_kwargs[name] = text
return self
def code_inline(self, *args, **kwargs):
self._add(self.formatter.code_inline, args, kwargs)
return self
def code_block(self, *args, **kwargs):
self._add(self.formatter.code_block, args, kwargs)
return self
def _add(self, func_to_apply, args, kwargs):
self.format_args.extend((func_to_apply(self._escaped(arg)) for arg in args))
for kwarg in kwargs:
self.format_kwargs[kwarg] = func_to_apply(self._escaped(kwargs[kwarg]))
def _escaped(self, text):
return self.formatted_text._escaped(text)
def end_format(self):
self.formatted_text.text = self.formatted_text.text.format(*self.format_args, **self.format_kwargs)
return self.formatted_text
|
Python
| 0.000001
|
@@ -6322,32 +6322,577 @@
_escaped(text)%0A%0A
+ def concat(self, *args, **kwargs):%0A %22%22%22%0A :type args: FormattedText%0A :type kwargs: FormattedText%0A %22%22%22%0A for arg in args:%0A assert self.formatted_text._is_compatible(arg), %22Cannot concat text with different modes%22%0A self.format_args.append(arg.text)%0A for kwarg in kwargs:%0A value = kwargs%5Bkwarg%5D%0A assert self.formatted_text._is_compatible(value), %22Cannot concat text with different modes%22%0A self.format_kwargs%5Bkwarg%5D = value.text%0A return self%0A%0A
def end_form
|
ff2958c25812fb9486e8611e44c93ba32b737866
|
migrate res.company object to new API
|
l10n_br_stock_account/res_company.py
|
l10n_br_stock_account/res_company.py
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2011 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp.osv import orm, fields
class res_company(orm.Model):
_inherit = 'res.company'
_columns = {
'stock_fiscal_category_id': fields.many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão Estoque'),
'stock_in_fiscal_category_id': fields.many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão de Entrada',
domain="[('journal_type', 'in', ('sale_refund', 'purchase')), "
"('fiscal_type', '=', 'product'), ('type', '=', 'input')]"),
'stock_out_fiscal_category_id': fields.many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão Saída',
domain="[('journal_type', 'in', ('purchase_refund', 'sale')), "
"('fiscal_type', '=', 'product'), ('type', '=', 'output')]"),
}
|
Python
| 0
|
@@ -1396,20 +1396,16 @@
nerp
-.osv
import
orm,
@@ -1400,19 +1400,22 @@
import
-orm
+models
, fields
@@ -1427,23 +1427,25 @@
ass
-res_c
+ResC
ompany(
-orm
+models
.Mod
@@ -1482,34 +1482,13 @@
ny'%0A
+%0A
-_columns = %7B%0A '
stoc
@@ -1503,34 +1503,34 @@
_category_id
-':
+ =
fields.
many2one(%0A
@@ -1509,33 +1509,33 @@
ory_id = fields.
-m
+M
any2one(%0A
@@ -1519,36 +1519,32 @@
ields.Many2one(%0A
-
'l10n_br
@@ -1570,36 +1570,32 @@
egory',%0A
-
u'Categoria Fisc
@@ -1613,27 +1613,21 @@
stoque')
-,%0A
+%0A
- '
stock_in
@@ -1641,34 +1641,34 @@
_category_id
-':
+ =
fields.
many2one(%0A
@@ -1647,33 +1647,33 @@
ory_id = fields.
-m
+M
any2one(%0A
@@ -1657,36 +1657,32 @@
ields.Many2one(%0A
-
'l10n_br
@@ -1708,36 +1708,32 @@
egory',%0A
-
u'Categoria Fisc
@@ -1751,28 +1751,24 @@
e Entrada',%0A
-
doma
@@ -1827,36 +1827,32 @@
e')), %22%0A
-
%22('fiscal_type',
@@ -1898,19 +1898,13 @@
)%5D%22)
-,
%0A
- '
stoc
@@ -1931,18 +1931,18 @@
y_id
-':
+ =
fields.
many
@@ -1937,17 +1937,17 @@
fields.
-m
+M
any2one(
@@ -1951,28 +1951,24 @@
ne(%0A
-
'l10n_br_acc
@@ -1986,28 +1986,24 @@
.category',%0A
-
u'Ca
@@ -2036,28 +2036,24 @@
a',%0A
-
domain=%22%5B('j
@@ -2100,28 +2100,24 @@
'sale')), %22%0A
-
%22('f
@@ -2176,12 +2176,5 @@
)%5D%22)
-,%0A %7D
%0A
|
5000eea27c511ad036f03b64e2be7dc69bac0845
|
Add `JSONField`
|
jacquard/odm/fields.py
|
jacquard/odm/fields.py
|
import abc
class BaseField(object, metaclass=abc.ABCMeta):
def __init__(self, null=False, default=None):
self.null = null
self.default = default
@abc.abstractmethod
def transform_to_storage(self, value):
raise NotImplementedError()
@abc.abstractmethod
def transform_from_storage(self, value):
raise NotImplementedError()
def _learn_from_owner(self, owner):
if owner is None:
return
if hasattr(self, 'owner'):
return
self.owner = owner
for field_name, value in vars(owner):
if value is self:
self.name = field_name
def validate(self, raw_value):
if not self.null and raw_value is None:
raise ValueError("%s is not nullable" % self.name)
def __get__(self, obj, owner):
if obj is None:
self._learn_from_owner(owner)
return self
try:
raw_value = obj._fields[self.name]
except KeyError:
return self.default
return self.transform_from_storage(raw_value)
def __set__(self, obj, value):
self._learn_from_owner(type(obj))
if value is None:
obj._fields[self.name] = None
else:
obj._fields[self.name] = self.transform_to_storage(value)
if obj.session:
obj.session.mark_instance_dirty(obj)
def __set_name__(self, owner, name):
self.owner = owner
self.name = name
class TextField(BaseField):
def transform_to_storage(self, value):
return value
def transform_from_storage(self, value):
return value
|
Python
| 0
|
@@ -4,16 +4,28 @@
ort abc%0A
+import copy%0A
%0A%0Aclass
@@ -1598,21 +1598,26 @@
return
+str(
value
+)
%0A%0A de
@@ -1676,8 +1676,199 @@
n value%0A
+%0A%0Aclass JSONField(BaseField):%0A def transform_to_storage(self, value):%0A return copy.deepcopy(value)%0A%0A def transform_from_storage(self, value):%0A return copy.deepcopy(value)%0A
|
aa3a8ee76f85ef1c3c4c0beb7b6c46a0c69961f1
|
allow absent of tornado
|
http2/__init__.py
|
http2/__init__.py
|
# -*- coding: utf-8 -*-
from tornado import version_info
if version_info[0] >= 4:
from http2.torando4 import *
else:
raise NotImplementedError()
|
Python
| 0.000096
|
@@ -18,17 +18,25 @@
-8 -*-%0A%0A
-%0A
+try:%0A
from tor
@@ -60,17 +60,55 @@
on_info%0A
-%0A
+except ImportError:%0A pass%0Aelse:%0A
if versi
@@ -124,16 +124,20 @@
%5D %3E= 4:%0A
+
from
@@ -165,42 +165,5 @@
t *%0A
-else:%0A raise NotImplementedError()
%0A
|
ea1fbd21761b5fbe60f179988114320dcb93cf92
|
remove unused attr
|
benchbuild/extensions/base.py
|
benchbuild/extensions/base.py
|
"""
Extension base-classes for compile-time and run-time experiments.
"""
import collections as c
import logging
import typing as tp
from abc import ABCMeta
import attr
from benchbuild.utils import run
LOG = logging.getLogger(__name__)
class Extension(metaclass=ABCMeta):
"""An experiment functor to implement composable experiments.
An experiment extension is always callable with an arbitrary amount of
arguments. The varargs component of an extension's `__call__` operator
is fed the binary command that we currently execute and all arguments
to the binary.
Any customization necessary for the extension (e.g, dynamic configuration
options) has to be passed by keyword argument.
Args:
*extensions: Variable length list of child extensions we manage.
config (:obj:`dict`, optional): Dictionary of name value pairs to be
stored for this extension.
Attributes:
next_extensions: Variable length list of child extensions we manage.
config (:obj:`dict`, optional): Dictionary of name value pairs to be
stored for this extension.
"""
def __init__(self,
*extensions: 'Extension',
config: tp.Any = None,
**kwargs: tp.Any):
"""Initialize an extension with an arbitrary number of children."""
del kwargs
self.next_extensions = extensions
self.config = config
def call_next(self, *args: tp.Any,
**kwargs: tp.Any) -> tp.List[run.RunInfo]:
"""Call all child extensions with the given arguments.
This calls all child extensions and collects the results for
our own parent. Use this to control the execution of your
nested extensions from your own extension.
Returns:
:obj:`list` of :obj:`RunInfo`: A list of collected
results of our child extensions.
"""
all_results = []
for ext in self.next_extensions:
LOG.debug(" %s ", ext)
results = ext(*args, **kwargs)
LOG.debug(" %s => %s", ext, results)
if results is None:
LOG.warning("No result from: %s", ext)
continue
result_list = []
if isinstance(results, c.Iterable):
result_list.extend(results)
else:
result_list.append(results)
all_results.extend(result_list)
return all_results
def __lshift__(self, rhs: 'Extension') -> 'Extension':
rhs.next_extensions = [self]
return rhs
def print(self, indent: int = 0) -> None:
"""Print a structural view of the registered extensions."""
LOG.info("%s:: %s", indent * " ", self.__class__)
for ext in self.next_extensions:
ext.print(indent=indent + 2)
def __call__(self, *args, **kwargs) -> tp.List[run.RunInfo]:
return self.call_next(*args, **kwargs)
def __str__(self) -> str:
return "Extension"
class ExtensionRequired(ValueError):
pass
class MissingExtension(Extension):
"""
Hard fail at runtime, when the user forgets to set an extension.
This raises an exception as soon as a user forgets to provide an extension
for a project from the experiment.
This should be the earliest possible moment to fail, without restricting
existing old experiments.
"""
def __call__(self, *args, **kwargs) -> tp.List[run.RunInfo]:
raise ExtensionRequired()
|
Python
| 0.000018
|
@@ -155,21 +155,8 @@
ta%0A%0A
-import attr%0A%0A
from
|
1484ae176d5bc8ba0afa3e948c8e507b2926a2c7
|
fix fqbn expansion
|
build_platform.py
|
build_platform.py
|
import sys
import glob
import time
import os
import subprocess
import collections
from clint.textui import colored
# add user bin to path!
BUILD_DIR = ''
# add user bin to path!
try:
BUILD_DIR = os.environ["TRAVIS_BUILD_DIR"]
except KeyError:
pass # ok maybe we're on actions?
try:
BUILD_DIR = os.environ["GITHUB_WORKSPACE"]
except KeyError:
pass # ok maybe we're on travis?
os.environ["PATH"] += os.pathsep + BUILD_DIR + "/bin"
print("build dir:", BUILD_DIR)
#os.system('pwd')
#os.system('ls -lA')
CROSS = u'\N{cross mark}'
CHECK = u'\N{check mark}'
ALL_PLATFORMS={
# classic Arduino AVR
"uno" : "arduino:avr:uno",
"leonardo" : "arduino:avr:leonardo",
"mega2560" : "arduino:avr:mega:cpu=atmega2560",
# Arduino SAMD
"zero" : "arduino:samd:arduino_zero_native",
"cpx" : "arduino:samd:adafruit_circuitplayground_m0",
# Espressif
"esp8266" : "esp8266:esp8266:huzzah:eesz=4M3M,xtal=80",
"esp32" : "esp32:esp32:featheresp32:FlashFreq=80",
# Adafruit AVR
"trinket" : "adafruit:avr:trinket5",
"gemma" : "arduino:avr:gemma",
"cpc" : "arduino:avr:circuitplay32u4cat",
# Adafruit SAMD
"m4" : "adafruit:samd:adafruit_metro_m4:speed=120",
"cpx_ada" : "adafruit:samd:adafruit_circuitplayground_m0",
# Adafruit nRF
"cpb" : "adafruit:nrf52:cplaynrf52840:softdevice=s140v6,debug=l0",
"main_platforms" : ("uno", "leonardo", "mega2560", "zero",
"esp8266", "esp32", "m4", "cpb"),
}
BSP_URLS = "https://adafruit.github.io/arduino-board-index/package_adafruit_index.json,http://arduino.esp8266.com/stable/package_esp8266com_index.json,https://dl.espressif.com/dl/package_esp32_index.json"
def install_platform(platform):
print("Installing", platform, end=" ")
if os.system("arduino-cli core install "+platform+" --additional-urls "+BSP_URLS+" > /dev/null") != 0:
print(colored.red("FAILED to install "+platform))
exit(-1)
print(colored.green(CHECK))
def run_or_die(cmd, error):
if os.system(cmd) != 0:
print(colored.red(error))
exit(-1)
################################ Install Arduino IDE
print()
print('#'*40)
print(colored.yellow("INSTALLING ARDUINO IDE"))
print('#'*40)
run_or_die('curl -fsSL https://raw.githubusercontent.com/arduino/arduino-cli/master/install.sh | sh', "FAILED to install arduino CLI")
# make all our directories we need for files and libraries
for directory in ("/.arduino15", "/.arduino15/packages",
"/Arduino", "/Arduino/libraries"):
os.mkdir(os.environ["HOME"]+directory)
run_or_die('arduino-cli config init > /dev/null',
"FAILED to configure arduino CLI")
run_or_die('arduino-cli core update-index > /dev/null',
"FAILED to update arduino core")
run_or_die("arduino-cli core update-index --additional-urls "+BSP_URLS+
" > /dev/null", "FAILED to update core indecies")
# link test library folder to the arduino libraries folder
os.symlink(BUILD_DIR, os.environ['HOME']+'/Arduino/libraries/Adafruit_Test_Library')
################################ Install dependancies
try:
libprop = open(BUILD_DIR+'/library.properties')
for line in libprop:
if line.startswith("depends="):
deps = line.replace("depends=", "").split(",")
for dep in deps:
dep = dep.strip()
print(colored.yellow("Installing "+dep))
run_or_die('arduino-cli lib install "'+dep+'" > /dev/null',
"FAILED to install dependancy "+dep)
except OSError:
pass # no library properties
################################ Test platforms
platforms = []
success = 0
# expand groups:
for arg in sys.argv[1:]:
platform = ALL_PLATFORMS[arg]
if isinstance(platform, collections.Iterable):
platforms.extend(platform)
if isinstance(platform, str):
platforms.append(platform)
for fqbn in platforms:
#print("building", platform, "full name", fqbn)
print('#'*80)
print(colored.yellow("SWITCHING TO "+fqbn), end=' ')
install_platform(":".join(fqbn.split(':', 2)[0:2])) # take only first two elements
print('#'*80)
exampledir = BUILD_DIR+"/examples"
for example in os.listdir(exampledir):
for filename in os.listdir(exampledir+"/"+example):
if filename.endswith(".ino"):
print('\t'+filename, end=' ')
cmd = ['arduino-cli', 'compile', '--fqbn', fqbn,
exampledir+"/"+example+"/"+filename]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
r = proc.wait()
err = proc.stderr.read()
out = proc.stdout.read()
#print("OUTPUT: ", out)
#print("ERROUT: ", err)
if r == 0:
print(colored.green(CHECK))
else:
print(colored.red(CROSS))
print(colored.red(err.decode("utf-8")))
success = 1
exit(success)
|
Python
| 0
|
@@ -3811,33 +3811,72 @@
-platforms.extend(platform
+for p in platform:%0A platforms.append(ALL_PLATFORMS%5Bp%5D
)%0A
@@ -3970,60 +3970,8 @@
ms:%0A
- #print(%22building%22, platform, %22full name%22, fqbn)%0A
|
50b7345c1dcb3c2fcc05fa61108fa1649ae17a0c
|
Add admin filters
|
django_iceberg/admin.py
|
django_iceberg/admin.py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django_iceberg.models import UserIcebergModel
class UserIcebergModelAdmin(admin.ModelAdmin):
list_display = ('user', 'environment', 'last_updated', 'application_namespace')
raw_id_fields = ("user",)
admin.site.register(UserIcebergModel, UserIcebergModelAdmin)
|
Python
| 0
|
@@ -104,16 +104,17 @@
gModel%0A%0A
+%0A
class Us
@@ -169,18 +169,16 @@
display
-
= ('user
@@ -236,16 +236,123 @@
space')%0A
+ list_filter = ('environment', 'last_updated')%0A search_fields = ('user_username', 'user_first_name')%0A
raw_
|
256017557537bbf40eb593637f1e373cfdbbb1ce
|
Add macOS SDK version checking Credits to @se4u for the command
|
buildlib/macos.py
|
buildlib/macos.py
|
# ungoogled-chromium: A Google Chromium variant for removing Google integration and
# enhancing privacy, control, and transparency
# Copyright (C) 2016 Eloston
#
# This file is part of ungoogled-chromium.
#
# ungoogled-chromium is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ungoogled-chromium is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ungoogled-chromium. If not, see <http://www.gnu.org/licenses/>.
'''Code for macOS'''
import tempfile
import pathlib
import subprocess
import shutil
from ._util import BuilderException
from .common import QuiltPatchComponent, GNMetaBuildComponent
class MacOSBuilder(QuiltPatchComponent, GNMetaBuildComponent):
'''Builder for macOS'''
_resources = pathlib.Path("resources", "macos")
def check_build_environment(self):
super(MacOSBuilder, self).check_build_environment()
self.logger.info("Checking svn command...")
result = self._run_subprocess(["svn", "--version", "--quiet"], stdout=subprocess.PIPE,
universal_newlines=True)
if not result.returncode is 0:
raise BuilderException("svn command returned non-zero exit code {}".format(
result.returncode))
self.logger.debug("Using svn command version '{!s}'".format(result.stdout.strip("\n")))
self.logger.info("Checking libtool command...")
libtool_path = shutil.which("libtool")
if libtool_path is None:
raise BuilderException("Could not find command 'libtool' in PATH variable")
self.logger.debug("Found libtool at '{!s}'".format(libtool_path))
# TODO: Maybe add check for macOS SDK version
self.logger.info("Checking g++ compiler for building libc++...")
gxx_compiler = shutil.which("g++-4.9")
if not pathlib.Path(gxx_compiler).is_file():
raise BuilderException("GNU compiler '{}' does not exist or is not a file".format(
gxx_compiler))
def build(self):
if (self._sandbox_dir / pathlib.Path("third_party", "libc++-static", "libc++.a")).exists():
self.logger.info("libc++.a already exists. Skipping its building")
else:
self.logger.info("Building libc++.a ...")
result = self._run_subprocess("./build.sh",
cwd=str(self._sandbox_dir /
pathlib.Path("third_party", "libc++-static")),
shell=True)
if not result.returncode == 0:
raise BuilderException("libc++.a build script returned non-zero exit code")
super(MacOSBuilder, self).build()
def generate_package(self):
# Based off of chrome/tools/build/mac/build_app_dmg
self.logger.info("Generating .dmg file...")
with tempfile.TemporaryDirectory() as tmpdirname:
pkg_dmg_command = [
str((self._sandbox_dir / pathlib.Path(
"chrome", "installer", "mac", "pkg-dmg")).relative_to(self.build_dir)),
"--source", "/var/empty",
"--target", "ungoogled-chromium_{}-{}_macos.dmg".format(self.chromium_version,
self.release_revision),
"--format", "UDBZ",
"--verbosity", "2",
"--volname", "Chromium", # From chrome/app/theme/chromium/BRANDING
"--tempdir", tmpdirname,
"--copy", str(self._sandbox_dir.relative_to(self.build_dir) / self.build_output /
"Chromium.app") + "/:/Chromium.app/",
"--symlink", "/Applications:/Drag to here to install"
]
result = self._run_subprocess(pkg_dmg_command, cwd=str(self.build_dir))
if not result.returncode == 0:
raise BuilderException("pkg-dmg returned non-zero exit code")
|
Python
| 0
|
@@ -2065,53 +2065,684 @@
-# TODO: Maybe add check for macOS SDK version
+self.logger.info(%22Checking macOS SDK version...%22)%0A result = self._run_subprocess(%5B%22xcrun%22, %22--show-sdk-version%22%5D, stdout=subprocess.PIPE,%0A universal_newlines=True)%0A if not result.returncode is 0:%0A raise BuilderException(%22xcrun command returned non-zero exit code %7B%7D%22.format(%0A result.returncode))%0A if not result.stdout.strip() in %5B%2210.10%22, %2210.11%22%5D: # TODO: Verify this is correct%0A raise BuilderException(%22Unsupported macOS SDK version '%7B!s%7D'%22.format(%0A result.stdout.strip()))%0A self.logger.debug(%22Using macOS SDK version '%7B!s%7D'%22.format(result.stdout.strip()))%0A
%0A
|
019a1ab10b71d4bb768e96957e9d485efeb588fc
|
add admin class for Attachment model --- djangobb_forum/admin.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-)
|
djangobb_forum/admin.py
|
djangobb_forum/admin.py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth.models import User
from djangobb_forum.models import Category, Forum, Topic, Post, Profile, Reputation,\
Report, Ban
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'position', 'forum_count']
class ForumAdmin(admin.ModelAdmin):
list_display = ['name', 'category', 'position', 'topic_count']
raw_id_fields = ['moderators', 'last_post']
class TopicAdmin(admin.ModelAdmin):
list_display = ['name', 'forum', 'created', 'head', 'post_count']
search_fields = ['name']
raw_id_fields = ['user', 'subscribers', 'last_post']
class PostAdmin(admin.ModelAdmin):
list_display = ['topic', 'user', 'created', 'updated', 'summary']
search_fields = ['body']
raw_id_fields = ['topic', 'user', 'updated_by']
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'status', 'time_zone', 'location', 'language']
raw_id_fields = ['user']
class ReputationAdmin(admin.ModelAdmin):
list_display = ['from_user', 'to_user', 'post', 'sign', 'time', 'reason']
raw_id_fields = ['from_user', 'to_user', 'post']
class ReportAdmin(admin.ModelAdmin):
list_display = ['reported_by', 'post', 'zapped', 'zapped_by', 'created', 'reason']
raw_id_fields = ['reported_by', 'post']
class BanAdmin(admin.ModelAdmin):
list_display = ['user', 'ban_start', 'ban_end', 'reason']
raw_id_fields = ['user']
class UserAdmin(auth_admin.UserAdmin):
list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'is_active']
def get_urls(self):
from django.conf.urls.defaults import patterns, url
return patterns('',
url(r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password), name='user_change_password'),
) + super(auth_admin.UserAdmin, self).get_urls()
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Forum, ForumAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Reputation, ReputationAdmin)
admin.site.register(Report, ReportAdmin)
admin.site.register(Ban, BanAdmin)
admin.site.disable_action('delete_selected') #disabled, because delete_selected ignoring delete model method
|
Python
| 0
|
@@ -231,16 +231,17 @@
utation,
+
%5C%0A Re
@@ -249,16 +249,28 @@
ort, Ban
+, Attachment
%0A%0A%0Aclass
@@ -1960,16 +1960,218 @@
urls()%0A%0A
+class AttachmentAdmin(admin.ModelAdmin):%0A list_display = %5B'id', 'name', 'size', 'path', 'hash', %5D%0A search_fields = %5B'name'%5D%0A list_display_links = ('name',)%0A list_filter = (%22content_type%22,)%0A%0A
%0Aadmin.s
@@ -2556,16 +2556,65 @@
anAdmin)
+%0Aadmin.site.register(Attachment, AttachmentAdmin)
%0A%0Aadmin.
@@ -2716,8 +2716,9 @@
l method
+%0A
|
df4abb113c3d3faa271e5385fec61ef2542c389c
|
Remove obsolete docstring
|
djangosaml2/backends.py
|
djangosaml2/backends.py
|
# Copyright (C) 2010-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2009 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User, SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
logger = logging.getLogger('djangosaml2')
class Saml2Backend(ModelBackend):
"""This backend is added automatically by the assertion_consumer_service
view.
Don't add it to settings.AUTHENTICATION_BACKENDS.
"""
def authenticate(self, session_info=None, attribute_mapping=None,
create_unknown_user=True):
if session_info is None or attribute_mapping is None:
logger.error('Session info or attribute mapping are None')
return None
if not 'ava' in session_info:
logger.error('"ava" key not found in session_info')
return None
attributes = session_info['ava']
if not attributes:
logger.error('The attributes dictionary is empty')
saml_user = None
for saml_attr, django_fields in attribute_mapping.items():
if 'username' in django_fields and saml_attr in attributes:
saml_user = attributes[saml_attr][0]
if saml_user is None:
logger.error('Could not find saml_user value')
return None
user = None
username = self.clean_username(saml_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if create_unknown_user:
logger.debug(
'Check if the user "%s" exists or create otherwise' % username)
user, created = User.objects.get_or_create(username=username)
if created:
logger.debug('New user created')
user = self.configure_user(user, attributes, attribute_mapping)
else:
logger.debug('User updated')
user = self.update_user(user, attributes, attribute_mapping)
else:
logger.debug('Retrieving existing user "%s"' % username)
try:
user = User.objects.get(username=username)
user = self.update_user(user, attributes, attribute_mapping)
except User.DoesNotExist:
logger.error('The user "%s" does not exist' % username)
pass
return user
def clean_username(self, username):
"""Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user, attributes, attribute_mapping):
"""Configures a user after creation and returns the updated user.
By default, returns the user with his attributes updated.
"""
user.set_unusable_password()
return self.update_user(user, attributes, attribute_mapping,
force_save=True)
def update_user(self, user, attributes, attribute_mapping,
force_save=False):
"""Update a user with a set of attributes and returns the updated user.
By default it uses a mapping defined in the settings constant
SAML_ATTRIBUTE_MAPPING. For each attribute, if the user object has
that field defined it will be set, otherwise it will try to set
it in the profile object.
"""
if not attribute_mapping:
return user
try:
profile = user.get_profile()
except ObjectDoesNotExist:
profile = None
except SiteProfileNotAvailable:
profile = None
user_modified = False
profile_modified = False
for saml_attr, django_attrs in attribute_mapping.items():
try:
for attr in django_attrs:
if hasattr(user, attr):
setattr(user, attr, attributes[saml_attr][0])
user_modified = True
elif profile is not None and hasattr(profile, attr):
setattr(profile, attr, attributes[saml_attr][0])
profile_modified = True
except KeyError:
# the saml attribute is missing
pass
if user_modified or force_save:
user.save()
if profile is not None and (profile_modified or force_save):
profile.save()
return user
|
Python
| 0.008334
|
@@ -956,158 +956,8 @@
nd):
-%0A %22%22%22This backend is added automatically by the assertion_consumer_service%0A view.%0A%0A Don't add it to settings.AUTHENTICATION_BACKENDS.%0A %22%22%22
%0A%0A
|
8a6b88c38b2844fba03b6664fe828ebbd5a08a68
|
use pkdlog so it passes test for pkdp
|
tests/pkdebug2_test.py
|
tests/pkdebug2_test.py
|
# -*- coding: utf-8 -*-
u"""pytest for `pykern.pkdebug`
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
def test_format(capsys):
from pykern import pkconfig
pkconfig.reset_state_for_testing({
'PYKERN_PKDEBUG_MAX_DEPTH': '2',
'PYKERN_PKDEBUG_MAX_ELEMENTS': '5',
'PYKERN_PKDEBUG_MAX_STRING': '5',
})
from pykern.pkdebug import pkdp
def _e(expected, value):
pkdp('{}', value)
out, err = capsys.readouterr()
err = ' '.join(err.split(' ')[1:])
assert expected + '\n' == err, 'expected={} actual={}'.format(expected, err)
_e(
"{'a': 'b', 'c': {'d': {<SNIP>}}, 'h': 'i'}",
{'a': 'b', 'c': {'d': {'e': {'f': 'g'}}}, 'h': 'i'},
)
_e(
'[1, 2, 3, 4, 5, 6, <SNIP>]',
[1, 2, 3, 4, 5, 6, 7, 8],
)
_e(
'(1, 2, 3, 4)',
(1, 2, 3, 4),
)
_e(
'(1, {2, 3}, {4: 5}, [6, 7])',
(1, {2, 3}, {4: 5}, [6, 7])
)
_e(
"{'Passw<SNIP>': '<REDA<SNIP>', 'c': {'botp': '<REDA<SNIP>'}, 'totp': '<REDA<SNIP>', 'q': ['pAssw<SNIP>', 1], 'x': 'y', 's': 'r', <SNIP>}",
{'Passwd': 'b', 'c': {'botp': 'a'}, 'totp': 'iiii', 'q': ['pAssword', 1], 'x': 'y', 's': 'r', 't': 'u'},
)
_e('a' * 5 + '<SNIP>', 'a' * 80)
_e('<SNIP>' + 'a' * 5, '\n File "' + 'a' * 80)
|
Python
| 0
|
@@ -518,17 +518,19 @@
port pkd
-p
+log
%0A%0A de
@@ -563,17 +563,19 @@
pkd
-p
+log
('%7B%7D', v
|
db4b63ee097116c5be711d1b6a69100065f1a885
|
update format unicode
|
weby/utils.py
|
weby/utils.py
|
# coding=utf8
from datetime import datetime, date
def format_dic(dic):
"""将 dic 格式化为 JSON,处理日期等特殊格式"""
for key, value in dic.iteritems():
dic[key] = format_value(value)
return dic
def format_value(value):
if isinstance(value, dict):
return format_dic(value)
elif isinstance(value, list):
return format_list(value)
elif isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, date):
return value.isoformat()
#elif isinstance(value, API_V1_Mixture):
#return value.to_api_dic(is_compact=True)
else:
return value
def format_list(l):
return [format_value(x) for x in l]
|
Python
| 0.000009
|
@@ -44,16 +44,28 @@
e, date%0A
+import json%0A
%0A%0Adef fo
@@ -236,12 +236,47 @@
alue
+, include_fields=%5B%5D, is_compact=True
):%0A
-%0A
@@ -404,17 +404,16 @@
(value)%0A
-%0A
elif
@@ -475,24 +475,25 @@
ormat()%0A
+#
elif isinsta
@@ -503,20 +503,20 @@
(value,
-date
+bool
):%0A
@@ -522,75 +522,70 @@
+#
return
-value.isoformat()
+1 if value else 0
%0A
-#
elif
-isinstance(value, API_V1_Mixture
+hasattr(value, 'to_api_dic'
):%0A
@@ -587,25 +587,24 @@
'):%0A
-#
return value
@@ -620,28 +620,92 @@
ic(i
-s_compact=Tr
+nclude_fields, is_compact)%0A else:%0A try:%0A json.dumps(val
ue)%0A
else
@@ -700,30 +700,63 @@
ue)%0A
+
-else:%0A
+ return value%0A except:%0A
return v
@@ -747,28 +747,39 @@
+
return
+unicode(
value
+)
%0A%0A%0Adef f
|
643cd60fb15157d8568fb9ec211c36c3002f44b6
|
clarify that this script DOES NOT preserve legacy Sonobat metadata (but it SHOULD)
|
bin/sb2guano.py
|
bin/sb2guano.py
|
#!/usr/bin/env python2
"""
Convert files with Sonobat-format metadata to use GUANO metadata.
"""
import sys
import os
import os.path
import mmap
import re
import wave
from contextlib import closing
from datetime import datetime
from pprint import pprint
from guano import GuanoFile
# regex for parsing Sonobat metadata
SB_MD_REGEX = re.compile(r'MMMMMMMMM(?P<sb_md>[\w\W]+)MMMMMMMMM')
SB_FREQ_REGEX = re.compile(r'\(#([\d]+)#\)')
SB_TE_REGEX = re.compile(r'<&([\d]*)&>')
SB_DFREQ_REGEX = re.compile(r'\[!([\w]+)!\]')
# old SonoBat format e.g. TransectTestRun1-24Mar11-16,27,56-Myoluc.wav
SONOBAT_FILENAME1_REGEX = re.compile(r'(?P<date>[ 0123][0-9][A-Z][a-z][a-z][0-9][0-9]-[012][0-9],[0-6][0-9],[0-6][0-9])(-(?P<species>[A-Za-z]+))?')
SONOBAT_FILENAME1_TIMESTAMP_FMT = '%d%b%y-%H,%M,%S'
# new SonoBat format 4-digit year e.g. TransectTestRun1-20110324_162756-Myoluc.wav
SONOBAT_FILENAME2_REGEX = re.compile(r'(?P<date>\d{8}_\d{6})(-(?P<species>[A-Za-z]+))?')
SONOBAT_FILENAME2_TIMESTAMP_FMT = '%Y%m%d_%H%M%S'
# new new SonoBat format 2-digit year e.g. TransectTestRun1-20110324_162756-Myoluc.wav
SONOBAT_FILENAME3_REGEX = re.compile(r'(?P<date>\d{6}_\d{6})(-(?P<species>[A-Za-z]+))?')
SONOBAT_FILENAME3_TIMESTAMP_FMT = '%y%m%d_%H%M%S'
# AR125 raw
AR125_FILENAME_REGEX = re.compile(r'_(?P<date>D\d{8}T\d{6})m\d{3}(-(?P<species>[A-Za-z]+))?')
AR125_FILENAME_TIMESTAMP_FMT = 'D%Y%m%dT%H%M%S'
SB_FILENAME_FORMATS = [
(SONOBAT_FILENAME1_REGEX, SONOBAT_FILENAME1_TIMESTAMP_FMT),
(SONOBAT_FILENAME2_REGEX, SONOBAT_FILENAME2_TIMESTAMP_FMT),
(SONOBAT_FILENAME3_REGEX, SONOBAT_FILENAME3_TIMESTAMP_FMT),
(AR125_FILENAME_REGEX, AR125_FILENAME_TIMESTAMP_FMT)
]
def extract_sonobat_metadata(fname):
"""Extract Sonobat-format metadata as a dict"""
sb_md = {}
# parse the Sonobat metadata itself
with open(fname, 'rb') as infile:
with closing(mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)) as mmfile:
md_match = re.search(SB_MD_REGEX, mmfile)
if not md_match:
print >> sys.stderr, 'No Sonobat metadata found in file: ' + fname
return None
md = md_match.groups()[0]
sb_md['samplerate'] = int(re.search(SB_FREQ_REGEX, md).groups()[0])
sb_md['te'] = int(re.search(SB_TE_REGEX, md).groups()[0])
sb_md['dfreq'] = re.search(SB_DFREQ_REGEX, md).groups()[0]
sb_md['note'] = md.split('!]', 1)[1]
with closing(wave.open(fname)) as wavfile:
duration_s = wavfile.getnframes() / float(wavfile.getframerate())
sb_md['length'] = duration_s / sb_md['te']
# try to extract info from the filename
for regex, timestamp_fmt in SB_FILENAME_FORMATS:
match = regex.search(fname)
if match:
sb_md['timestamp'] = datetime.strptime(match.group('date'), timestamp_fmt)
sb_md['species'] = match.group('species')
return sb_md
def sonobat2guano(fname):
"""Convert a file with Sonobat metadata to GUANO metadata (but leave the old stuff in place)"""
print '\n', fname
sb_md = extract_sonobat_metadata(fname)
pprint(sb_md)
gfile = GuanoFile(fname)
gfile['GUANO|Version'] = 1.0
if 'timestamp' in sb_md:
gfile['Timestamp'] = sb_md['timestamp']
if sb_md.get('te', 1) != 1:
gfile['TE'] = sb_md['te']
gfile['Length'] = sb_md['length']
gfile['Note'] = sb_md['note'].strip().replace('\r', '\\n')
if sb_md.get('species', None):
gfile['Species Auto ID'] = sb_md['species']
print gfile._as_string()
gfile.write()
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s FILE...' % os.path.basename(sys.argv[0])
sys.exit(2)
for fname in sys.argv[1:]:
sonobat2guano(fname)
|
Python
| 0
|
@@ -85,16 +85,47 @@
etadata.
+%0A%0Ausage: sb2guano.py WAVFILE...
%0A%22%22%22%0A%0Aim
@@ -3053,43 +3053,8 @@
data
- (but leave the old stuff in place)
%22%22%22%0A
|
28a9b6d744f088fed074e2d7166ed14d78c942a7
|
Update logging
|
weibo/core.py
|
weibo/core.py
|
# coding=utf-8
import concurrent.futures
import itertools
import logging
import os
from colorama import Fore, Style
import settings
from weibo.api import WeiboApi
class Crawler(object):
def __init__(self, target_url):
"""
初始化
:param target_url: 目标微博主页url
"""
# 目标数据
self.target = WeiboApi.fetch_user_info(target_url)
self.uid, self.name = self.target['oid'], self.target['onick']
# 本地预处理
self.root = self.__init_folder()
self.logger = logging.getLogger(self.__class__.__name__)
def start(self):
"""
依次下载每一个相册
:return: None
"""
self.logger.info(Fore.BLUE + Style.BRIGHT + '开始下载 "%s" 的微博相册' % self.name)
# 获取每一页的相册列表
page_size, album_count = 20, 0
for page in itertools.count(1):
total, album_list = WeiboApi.fetch_album_list(self.uid, page, page_size)
if not album_list:
break
for album in album_list:
album_count += 1
msg = '开始下载第 %d / %d 个微博相册《%s》' % (album_count, total, album['caption'])
self.logger.info(Fore.BLUE + msg)
self.__download_album(album)
def __download_album(self, album):
"""
下载单个相册
:param album: 相册数据
:return: None
"""
# 相册所有图片的id
all_photo_ids = WeiboApi.fetch_photo_ids(self.uid, album['album_id'], album['type'])
self.logger.info(Fore.BLUE + '检测到 %d 张图片' % len(all_photo_ids))
# 相册所有大图的数据
all_large_pics = self.__fetch_large_pics(album, all_photo_ids)
total = len(all_large_pics)
# 下载所有大图
with concurrent.futures.ThreadPoolExecutor() as executor:
album_path = self.__make_album_path(album)
future_to_large = {
executor.submit(self.__download_pic, large, album_path): large
for large in all_large_pics
}
for i, future in enumerate(concurrent.futures.as_completed(future_to_large)):
large = future_to_large[future]
count_msg = '%d/%d ' % (i + 1, total)
try:
result, path = future.result()
except Exception as exc:
err = '%s 抛出了异常: %s' % (WeiboApi.make_large_url(large), exc)
self.logger.error(''.join([Fore.RED, count_msg, err]))
else:
style = result and Style.NORMAL or Style.DIM
self.logger.info(''.join([Fore.GREEN, style, count_msg, path]))
else:
self.logger.info(Fore.BLUE + '《%s》 已完成' % album['caption'])
def __fetch_large_pics(self, album, ids):
"""
获取某相册所有的大图数据
:param album: 相册
:param ids: 所有图片的id
:return: list
"""
chunk_size, all_large_pics = 50, []
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_chunk = {
executor.submit(
WeiboApi.fetch_large_list,
self.uid, ids[i: i + chunk_size], album['type']
): i
for i in range(0, len(ids), chunk_size)
}
for future in concurrent.futures.as_completed(future_to_chunk):
chunk = future_to_chunk[future]
try:
large_list = future.result()
except Exception as exc:
err = '在查询第 %d 块的 %d 个图片的大图时抛出了异常: %s'
self.logger.error(Fore.RED + err % (chunk, chunk_size, exc))
else:
all_large_pics.extend(large_list)
self.logger.info(Fore.BLUE + '检测到 %d 张大图' % len(all_large_pics))
return all_large_pics
def __download_pic(self, pic, path):
"""
下载单个图片
:param pic: 图片数据
:param path: 存储路径目录
:return: bool 下载结果, str 下载路径
"""
path = os.path.join(path, self.__make_photo_name(pic))
if not os.path.exists(path):
url = WeiboApi.make_large_url(pic)
response = WeiboApi.get(url, timeout=60)
with open(path, 'wb') as fp:
fp.write(response.content)
return True, path
return False, path
def __make_album_path(self, album):
"""
生成并创建相册下载路径
:param album: 相册数据
:return: str 下载路径
"""
album_path = os.path.join(self.root, album['caption'])
if not os.path.exists(album_path):
os.mkdir(album_path)
return album_path
def __make_photo_name(self, large):
"""
生成图片文件名
:param large: 图片数据
:return: str 文件名
"""
f, p = large.get('feed_id'), large['pic_name']
return '_'.join(f and [f, p] or [p])
def __init_folder(self):
"""
准备文件夹
需要检测是否存在相同uid、不同name的情况
:return: str 该用户的存储文件夹名
"""
# 根目录
root = settings.STORE_PATH
if not os.path.exists(root):
os.mkdir(root)
# 用户目录名
home = '-'.join([self.uid, self.name])
home_path = os.path.join(root, home)
# 处理用户更改微博名的情况
for dir in os.listdir(root):
if dir.startswith(self.uid):
if dir != home:
src = os.path.join(root, dir)
dst = os.path.join(root, home)
os.rename(src, dst)
break
else: # 没有已知uid的文件夹
os.mkdir(home_path)
return home_path
|
Python
| 0.000001
|
@@ -304,14 +304,129 @@
-# %E7%9B%AE%E6%A0%87%E6%95%B0%E6%8D%AE
+self.logger = logging.getLogger(self.__class__.__name__)%0A%0A # %E7%9B%AE%E6%A0%87%E6%95%B0%E6%8D%AE%0A self.logger.info(Fore.BLUE + target_url)
%0A
@@ -613,73 +613,8 @@
er()
-%0A self.logger = logging.getLogger(self.__class__.__name__)
%0A%0A
|
d5d5c342fde657de3e21fbd454bda007f79aa4db
|
Remove superfluous SSID confirmation messages
|
cicoclient/cli.py
|
cicoclient/cli.py
|
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import sys
from cliff.lister import Lister
from cicoclient.wrapper import CicoWrapper
from cicoclient import utils
class Inventory(Lister):
"""Returns a node inventory from the ci.centos.org infrastructure."""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Inventory, self).get_parser(prog_name)
parser.add_argument(
'--all',
action='store_true',
default=False,
help='Display all nodes, regardless if an API key is used.'
)
parser.add_argument(
'--ssid',
metavar="<ssid>",
default=None,
help='Only return nodes matching the provided ssid.'
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
api = CicoWrapper(
endpoint=self.app.options.endpoint,
api_key=self.app.options.api_key
)
inventory = api.inventory(all=parsed_args.all,
ssid=parsed_args.ssid)
columns = ('host_id', 'hostname', 'ip_address', 'chassis',
'used_count', 'current_state', 'comment', 'distro',
'rel', 'centos_version', 'architecture', 'node_pool')
return (columns,
(utils.get_dict_properties(inventory[host], columns)
for host in inventory))
class NodeGet(Lister):
"""Requests nodes from the ci.centos.org infrastructure"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(NodeGet, self).get_parser(prog_name)
parser.add_argument(
'--arch',
metavar='<arch>',
choices=['i386', 'x86_64'],
default='x86_64',
help='Requested server architecture. Defaults to x86_64.'
)
parser.add_argument(
'--release',
metavar='<release>',
choices=['5', '6', '7'],
default='7',
help='Requested CentOS release. Defaults to 7.'
)
parser.add_argument(
'--count',
metavar='<count>',
type=int,
default=1,
help='Requested amount of servers. Defaults to 1.'
)
parser.add_argument(
'--retry-count',
metavar='<count>',
type=int,
default=1,
help='Amount of retries to do in case of failure. Defaults to 1.'
)
parser.add_argument(
'--retry-interval',
metavar='<seconds>',
type=int,
default=10,
help='Wait between subsequent retries. Defaults to 10 (seconds).'
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
api = CicoWrapper(
endpoint=self.app.options.endpoint,
api_key=self.app.options.api_key
)
hosts, ssid = api.node_get(arch=parsed_args.arch,
ver=parsed_args.release,
count=parsed_args.count,
retry_count=parsed_args.retry_count,
retry_interval=parsed_args.retry_interval)
message = "SSID for these servers: %s\n" % ssid
sys.stdout.write(message)
columns = ('host_id', 'hostname', 'ip_address', 'chassis',
'used_count', 'current_state', 'comment', 'distro',
'rel', 'centos_version', 'architecture', 'node_pool')
return (columns,
(utils.get_dict_properties(hosts[host], columns)
for host in hosts))
class NodeDone(Lister):
"""Releases nodes from the ci.centos.org infrastructure for a ssid"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(NodeDone, self).get_parser(prog_name)
parser.add_argument(
'ssid',
metavar='<ssid>',
help='SSID of the server pool to release'
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
api = CicoWrapper(
endpoint=self.app.options.endpoint,
api_key=self.app.options.api_key
)
hosts = api.node_done(ssid=parsed_args.ssid)
message = "Released these servers with SSID: %s\n" % parsed_args.ssid
sys.stdout.write(message)
columns = ('host_id', 'hostname', 'ip_address', 'chassis',
'used_count', 'current_state', 'comment', 'distro',
'rel', 'centos_version', 'architecture', 'node_pool')
return (columns,
(utils.get_dict_properties(hosts[host], columns)
for host in hosts))
|
Python
| 0.000008
|
@@ -3913,98 +3913,8 @@
val)
-%0A message = %22SSID for these servers: %25s%5Cn%22 %25 ssid%0A sys.stdout.write(message)
%0A%0A
@@ -4908,120 +4908,8 @@
sid)
-%0A message = %22Released these servers with SSID: %25s%5Cn%22 %25 parsed_args.ssid%0A sys.stdout.write(message)
%0A%0A
|
72df56880ffaf0aba3b6f919d5a7f2add32623dc
|
Update binary_clock.py
|
binary_clock.py
|
binary_clock.py
|
__author__ = 'tim mcguire'
import datetime
import math
import Tkinter
import sys,os
def to_binary(dec, width):
x = width - 1
answer = ""
while x >= 0:
current_power = math.pow(2, x)
# how many powers of two fit into dec?
how_many = int(dec / current_power)
answer += str(how_many)
dec -= how_many * current_power
x -= 1
return answer
def draw_vertical_line(x):
main_canvas.create_line(x+17,start_y+20,x+17,start_y - 60)
def fill_dots(times_to_use, x,length):
tup = tens_and_ones(times_to_use)
for num in tup:
binary_string = to_binary(num, length)
length =4
x += right_step
y = start_y
for bit in reversed(binary_string):
coord = x, y, x + dot_size, y + dot_size
if bit == '1':
main_canvas.create_oval(coord, fill="red")
else:
main_canvas.create_oval(coord, fill="blue")
y -= 15
return x
def tens_and_ones(num):
tens = int(num / 10)
ones = num % 10
return tens, ones
def run(master):
t = datetime.datetime.now()
time_collection = t.hour, t.minute, t.second
x = 15
length =2
for val in time_collection:
# val is the numeric value, x is horizontal offset, length is how many dots tall the stack will be
x = fill_dots(val, x,length)
draw_vertical_line(x)
length =3
main_canvas.pack()
main_canvas.after(200, run, master)
time_format = sys.argv[1]
start_y = 150
right_step = 20
dot_size = 15
root = Tkinter.Tk()
root.geometry('300x200')
main_canvas = Tkinter.Canvas(root, bg="blue", height=300, width=200)
run(main_canvas)
root.mainloop()
|
Python
| 0.000002
|
@@ -1492,17 +1492,60 @@
aster)%0A%0A
-%0A
+time_format = 24%0Aif len(sys.argv) %3E= 2:%0A
time_for
@@ -1562,16 +1562,17 @@
argv%5B1%5D%0A
+%0A
start_y
@@ -1755,8 +1755,9 @@
inloop()
+%0A
|
36ca52e816a2938c6723e3ec2ed4a350958c78d8
|
remove comments
|
binary_clock.py
|
binary_clock.py
|
__author__ = 'tim mcguire'
import datetime
import math
import Tkinter
def to_binary(dec, width):
x = width - 1
answer = ""
while x >= 0:
current_power = math.pow(2, x)
# how many powers of two fit into dec?
how_many = int(dec / current_power)
answer += str(how_many)
dec -= how_many * current_power
x -= 1
return answer
def fill_dots(times_to_use, x,length):
tup = tens_and_ones(times_to_use)
for num in tup:
#2,4,3,4,3,4
binary_string = to_binary(num, length)
length =4
x += right_step
y = start_y
for bit in reversed(binary_string):
coord = x, y, x + dot_size, y + dot_size
if bit == '1':
main_canvas.create_oval(coord, fill="red")
else:
main_canvas.create_oval(coord, fill="blue")
y -= 15
return x
def tens_and_ones(num):
tens = int(num / 10)
ones = num % 10
return tens, ones
def run(master):
t = datetime.datetime.now()
time_collection = t.hour, t.minute, t.second
x = 15
length =2
for val in time_collection:
# val is the numeric value, x is horizontal offset, length is how many dots tall the stack will be
x = fill_dots(val, x,length)
length =3
main_canvas.pack()
main_canvas.after(200, run, master)
start_y = 150
right_step = 20
dot_size = 15
root = Tkinter.Tk()
root.geometry('300x200')
main_canvas = Tkinter.Canvas(root, bg="blue", height=300, width=200)
run(main_canvas)
root.mainloop()
|
Python
| 0
|
@@ -484,29 +484,8 @@
up:%0A
- #2,4,3,4,3,4%0A
|
e59d6be5a31dbe775f6481d079f0f4e81a27a9ce
|
Add import of the re module to the utils module
|
classyfd/utils.py
|
classyfd/utils.py
|
"""
Contains utility functions used within this library that are also useful
outside of it.
"""
import os
import pwd
import string
import random
# Operating System Functions
def determine_if_os_is_posix_compliant():
"""
Determine if the operating system is POSIX compliant or not
Return Value:
(bool)
"""
return bool(os.name == "posix")
def determine_if_running_as_root_user():
"""
Determine if the user running Python is "root" or not
Supported Operating Systems:
Unix-like
Return Value:
(bool)
"""
# 0 is the UID used for most Unix-like systems for the root user. In the
# event that it's not, another check is done to see if the username is
# "root".
#
# For an explanation on why os.geteuid was used, instead of os.getuid,
# see: http://stackoverflow.com/a/14951764
is_running_as_root = bool(
os.geteuid() == 0 or
pwd.getpwuid(os.geteuid()).pw_name.lower() == "root"
)
return is_running_as_root
# File Functions
def get_random_file_name(directory):
"""
Generate a random, unique file name of 32 characters
The generated file name may include lowercase letters and numbers.
Parameters:
directory -- (str) the directory the file will be in. This will determine
the unique name given to it.
Return Value:
random_file_name -- (str) this is just a randomly generated file name, so
the full/absolute path is not included.
"""
CHARACTER_LENGTH = 32
NUMBERS = string.digits
LETTERS = string.ascii_lowercase
VALID_CHARACTERS = tuple(LETTERS + NUMBERS)
while True:
random_file_name = ""
for i in range(CHARACTER_LENGTH):
random_file_name += random.choice(VALID_CHARACTERS)
file_path_already_exists = os.path.exists(
os.path.join(directory, random_file_name)
)
if file_path_already_exists:
# Try again
continue
else:
# Sweet, use the generated file name
break
return random_file_name
|
Python
| 0
|
@@ -141,16 +141,26 @@
random%0A
+import re%0A
%0A%0A# Oper
@@ -2168,24 +2168,27 @@
return random_file_name
+%0A%0A%0A
|
00e865178f8e1762e7cd1ec8d44713d73cc58c47
|
tidy up of DynTypedNode in python
|
clast/__init__.py
|
clast/__init__.py
|
import _clast
from _clast import *
## REPRESENTATIVE CLASSES ONLY
def cxxRecordDecl(*args):
return _clast._cxxRecordDecl(list(args))
def decl(*args):
return _clast._decl(list(args))
def stmt(*args):
return _clast._stmt(list(args))
def forStmt(*args):
return _clast._forStmt(list(args))
def hasLoopInit(arg):
return _clast._hasLoopInit(arg)
def ifStmt(*args):
return _clast._ifStmt(list(args))
def hasCondition(expr):
return _clast._hasCondition(expr)
|
Python
| 0.000001
|
@@ -33,455 +33,170 @@
*%0A%0A
-## REPRESENTATIVE CLASSES ONLY%0A%0Adef cxxRecordDecl(*args):%0A return _clast._cxxRecordDecl(list(args))%0A%0Adef decl(*args):%0A return _clast._decl(list(args))%0A%0Adef stmt(*args):%0A return _clast._stmt(list(args))%0A%0Adef forStmt(*args):%0A return _clast._forStmt(list(args))%0A%0Adef hasLoopInit(arg):%0A return _clast._hasLoopInit(arg)%0A%0Adef ifStmt(*args):%0A return _clast._ifStmt(list(args))%0A%0Adef hasCondition(expr):%0A return _clast._hasCondition(expr)
+def __get(self, kind):%0A return getattr(self, '_get_' + kind.__name__)()%0A%0A# Monkey patch an extra method on that we can't do in C++%0A_clast.DynTypedNode.get = __get
%0A
|
a3ad232c3f9734e94ed09088b260ff7f6bd722d7
|
Fix wordExists()
|
Library.py
|
Library.py
|
import dataset
import re
from Generator import generateWord
db = None
phonemes = {}
allophones = {}
declensions = {}
categories = {}
def transcribePhonemes(word):
'''Transcribe from orthographic representation to phonetic
representation.
'''
for current, new in phonemes.items():
word = re.sub(current, new, word)
word = "/" + word + "/"
return word
def transcribeAllophones(word):
'''Transcribe from phonetic representation to full IPA
representation.
'''
word = word[1:-1]
for current, new in allophones.items():
word = re.sub(current, new, word)
word = "[" + word + "]"
return word
def getStatistics():
'''Returns number of words in database.'''
return len(db['words'])
def exportWords(filename):
'''Takes filename and outputs csv.'''
allWords = db['words'].all()
dataset.freeze(allWords, format='csv', filename=filename)
print("Exported all words to " + filename)
def searchWords(term):
'''Takes a search term. Returns tuple of two lists, the first
populated with matching English words and the second with
matching conlang words.
'''
englishresult = db['words'].find(english=term)
conlangresult = db['words'].find(word=term)
return (list(englishresult), list(conlangresult))
def getAvailableDeclensions():
'''Returns declension list.'''
return list(declensions)
def declineWord(word, d):
'''Declines word with declension d. Returns declined word.'''
dec = declensions[d].split("->")
word['word'] = re.sub(dec[0], dec[1], word['word'])
return word
def findConWord(term):
'''Finds the first occurrence of term in conlang column of database and
returns as a word.
'''
word = db['words'].find_one(word=term)
if word is None:
raise LookupError
else:
return word
def findEnglishWord(term):
'''Finds the first occurrence of term in English column of database
and returns as a word.
'''
word = db['words'].find_one(english=term)
if word is None:
raise LookupError
else:
return word
def wordExists(term):
'''Accepts string and searches for it in conlang words list and English words
list. If word exists in database, returns True, otherwise returns False.
'''
try:
findConWord(term)
findEnglishWord(term)
except LookupError:
return False
else:
return True
def getFields():
'''Returns list of fields, not including id, english, or word.'''
fields = db['words'].columns
fields.remove("english")
fields.remove("word")
fields.remove("id")
return fields
def getFieldOptions(field):
'''Takes a field. Returns all possible options for field that
exist within database.
'''
l = list(db['words'][field])
options = []
for item in l:
options.append(item[field])
if None in options:
options.remove(None)
return options
def listWords(t, f=None, o=None):
'''Takes type of list (full or specific form) and form. Returns list of
matching words.
'''
outList = []
if t == "all":
for word in db['words']:
outList.append(word)
elif t == "field":
q = 'SELECT * FROM words WHERE ' + f + ' LIKE "' + o + '"'
for word in db.query(q):
outList.append(word)
return outList
def addWord(word):
'''Takes word object and adds word to database.'''
db['words'].insert(word)
def setPhonemes(l):
global phonemes
phonemes = l
def setAllophones(l):
global allophones
allophones = l
def setCategories(l):
global categories
categories = l
def getCategories():
return categories
def setDeclensions(l):
global declensions
declensions = l
def loadDatabase(filename="words.db"):
global db
location = "sqlite:///" + filename
db = dataset.connect(location)
|
Python
| 0.001251
|
@@ -1793,69 +1793,8 @@
m)%0A%0A
- if word is None:%0A raise LookupError%0A else:%0A
@@ -1992,69 +1992,8 @@
m)%0A%0A
- if word is None:%0A raise LookupError%0A else:%0A
@@ -2021,20 +2021,42 @@
dExists(
-term
+english=None, conlang=None
):%0A '
@@ -2225,11 +2225,30 @@
-try
+if conlang is not None
:%0A
@@ -2249,24 +2249,27 @@
ne:%0A
+if
findConWord(
@@ -2272,117 +2272,171 @@
ord(
-term)%0A findEnglishWord(term)%0A except LookupError:%0A return False%0A else:%0A
+conlang) is not None:%0A return True%0A if english is not None:%0A if findEnglishWord(english) is not None:%0A return True%0A%0A
-
return
-Tru
+Fals
e%0A%0A%0A
|
da1fc79f8eb476f7ed22d7969a1558ab6a1e3f5d
|
Use a name for the fabricated type that makes clearer it is fabricated
|
src/zeit/cms/content/add.py
|
src/zeit/cms/content/add.py
|
# Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import datetime
import grokcore.component as grok
import urllib
import zeit.cms.content.interfaces
import zeit.cms.repository.interfaces
import zope.browser.interfaces
import zope.component
import zope.interface
class ContentAdder(object):
zope.interface.implements(zeit.cms.content.interfaces.IContentAdder)
def __init__(self, request,
type_=None, ressort=None,
sub_ressort=None, year=None, month=None):
self.request = request
self.type_ = type_
self.ressort = ressort
self.sub_ressort = sub_ressort
now = datetime.date.today()
if year is None:
year = now.year
if month is None:
month = now.month
self.year = year
self.month = month
def __call__(self):
# we want to register the IAddLocation adapter for the content-type,
# which is an *interface*. We need a representative object providing
# that interface to be able to ask for those adapters, since
# zope.component looks for provides when an interface is required, and
# interfaces don't provide themselves.
dummy = type(self.type_.__name__, (object,), {})()
zope.interface.alsoProvides(dummy, self.type_)
context = zope.component.getMultiAdapter(
(dummy, self), zeit.cms.content.interfaces.IAddLocation)
params = {}
for key in ['ressort', 'sub_ressort']:
token = self._get_token(key)
if token is not None:
params['form.' + key] = token
return '%s/@@%s?%s' % (
zope.traversing.browser.absoluteURL(context, self.request),
self.type_.getTaggedValue('zeit.cms.addform'),
urllib.urlencode(params))
def _get_token(self, field,
interface=zeit.cms.content.interfaces.IContentAdder):
field = interface[field]
source = callable(field.source) and field.source(self) or field.source
terms = zope.component.getMultiAdapter(
(source, self.request), zope.browser.interfaces.ITerms)
value = field.get(self)
if not value:
return None
return terms.getTerm(value).token
@grok.adapter(
zeit.cms.interfaces.ICMSContent,
zeit.cms.content.interfaces.IContentAdder)
@grok.implementer(zeit.cms.content.interfaces.IAddLocation)
def ressort_year_folder(type_, adder):
ressort = adder.ressort and adder.ressort.lower()
sub_ressort = adder.sub_ressort and adder.sub_ressort.lower()
return find_or_create_folder(
ressort, sub_ressort, '%s-%02d' % (adder.year, int(adder.month)))
def find_or_create_folder(*path_elements):
repos = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
folder = repos
for elem in path_elements:
if elem is None:
continue
if elem not in folder:
folder[elem] = zeit.cms.repository.folder.Folder()
folder = folder[elem]
return folder
|
Python
| 0.000018
|
@@ -1232,16 +1232,30 @@
= type(
+'Provides_' +
self.typ
|
23cafee3069733ec53691409a83ab51024da2c5c
|
Remove incorrect comment
|
src/zeit/retresco/update.py
|
src/zeit/retresco/update.py
|
import argparse
import gocept.runner
import grokcore.component as grok
import logging
import time
import zeit.cms.celery
import zeit.cms.checkout.interfaces
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.cms.repository.interfaces
import zeit.cms.workingcopy.interfaces
import zeit.retresco.interfaces
import zope.component
import zope.lifecycleevent
log = logging.getLogger(__name__)
@grok.subscribe(zope.lifecycleevent.IObjectAddedEvent)
def index_after_add(event):
# We don't use the "extended" (object, event) method, as we are not
# interested in the events which are dispatched to sublocations.
context = event.object
if not zeit.cms.interfaces.ICMSContent.providedBy(context):
return
if zeit.cms.repository.interfaces.IRepository.providedBy(context):
return
if zeit.cms.workingcopy.interfaces.IWorkingcopy.providedBy(
event.newParent):
return
log.info('AfterAdd: Creating async index job for %s' % context.uniqueId)
index_async.delay(context.uniqueId)
@grok.subscribe(
zeit.cms.interfaces.ICMSContent,
zeit.cms.checkout.interfaces.IAfterCheckinEvent)
def index_after_checkin(context, event):
if event.publishing:
return
index_async.apply_async((context.uniqueId,), countdown=5)
@grok.subscribe(
zeit.cms.interfaces.ICMSContent,
zope.lifecycleevent.IObjectRemovedEvent)
def unindex_on_remove(context, event):
if zeit.cms.workingcopy.interfaces.IWorkingcopy.providedBy(
event.oldParent):
return
unindex_async.delay(zeit.cms.content.interfaces.IUUID(context).id)
@zeit.cms.celery.task(bind=True, queuename='search')
def index_async(self, uniqueId):
context = zeit.cms.interfaces.ICMSContent(uniqueId, None)
if context is None:
log.warning('Could not index %s because it does not exist any longer.',
uniqueId)
return
meta = zeit.cms.content.interfaces.ICommonMetadata(context, None)
has_keywords = True
if meta is not None:
# XXX Transitional period only, so we can index to TMS in production
# in parallel while Intrafind is still active. Once TMS is in
# production, reduce to simply
# `has_keywords = meta is not None and meta.keywords`
try:
has_keywords = len(zeit.retresco.tagger.Tagger(meta))
except Exception:
pass
try:
index(
context, enrich=True, update_keywords=not has_keywords)
except zeit.retresco.interfaces.TechnicalError:
self.retry()
def index(content, enrich=False, update_keywords=False, publish=False):
if update_keywords and not enrich:
raise ValueError('enrich is required for update_keywords')
conn = zope.component.getUtility(zeit.retresco.interfaces.ITMS)
stack = [content]
errors = []
while stack:
content = stack.pop(0)
if zeit.cms.repository.interfaces.ICollection.providedBy(content):
stack.extend(content.values())
uuid = getattr(zeit.cms.content.interfaces.IUUID(content, None), 'id',
'<no-uuid>')
log.info('Updating: %s %s, enrich: %s, keywords: %s, publish: %s',
content.uniqueId, uuid, enrich, update_keywords, publish)
try:
if enrich:
log.debug('Enriching: %s', content.uniqueId)
response = conn.enrich(content)
body = response.get('body')
if update_keywords:
tagger = zeit.retresco.tagger.Tagger(content)
tagger.update(conn.generate_keyword_list(response))
else:
# For reindex-only, preserve the previously enriched body.
# Note: This only works when content is already published in
# TMS, but for a large-scale reindex where we don't want to
# have to enrich again that's probably fine.
body = conn.get_article_data(content).get('body')
conn.index(content, body)
if publish:
pub_info = zeit.cms.workflow.interfaces.IPublishInfo(content)
if pub_info.published:
if zeit.retresco.interfaces.ITMSRepresentation(
content)() is not None:
log.info('Publishing: %s', content.uniqueId)
conn.publish(content)
else:
log.info(
'Skip publish for %s, missing required fields',
content.uniqueId)
except zeit.retresco.interfaces.TechnicalError, e:
log.info('Retrying %s due to %r', content.uniqueId, e)
raise
except Exception, e:
errors.append(e)
log.warning('Error indexing %s, giving up',
content.uniqueId, exc_info=True)
continue
return errors
@zeit.cms.celery.task(bind=True, queuename='search')
def unindex_async(self, uuid):
conn = zope.component.getUtility(zeit.retresco.interfaces.ITMS)
try:
conn.delete_id(uuid)
except zeit.retresco.interfaces.TechnicalError:
self.retry()
SKIP_TYPES = ['image', 'imagegroup', 'quiz']
def should_skip(content):
content_type = zeit.cms.type.get_type(content)
return content_type in SKIP_TYPES
@zeit.cms.celery.task(bind=True, queuename='manual')
def index_parallel(self, unique_id, enrich=False, publish=False):
try:
content = zeit.cms.interfaces.ICMSContent(unique_id)
except TypeError:
log.warning('Could not resolve %s, giving up', unique_id)
return
except Exception:
self.retry()
if zeit.cms.repository.interfaces.ICollection.providedBy(content):
children = content.values()
for item in children:
if should_skip(item):
log.debug('Skipping %s due to its content type', item)
continue
index_parallel.delay(item.uniqueId, enrich=enrich, publish=publish)
else:
if should_skip(content):
log.debug('Skipping %s due to its content type', content)
return
start = time.time()
try:
errors = index(content, enrich=enrich, update_keywords=enrich,
publish=publish)
except zeit.retresco.interfaces.TechnicalError:
self.retry()
else:
stop = time.time()
if not errors:
log.info('Processed %s in %s', content.uniqueId, stop - start)
@gocept.runner.once(principal=gocept.runner.from_config(
'zeit.retresco', 'index-principal'))
def reindex():
parser = argparse.ArgumentParser(description='Reindex folder in TMS')
parser.add_argument(
'ids', type=unicode, nargs='+', help='uniqueIds to reindex')
parser.add_argument(
'--file', action='store_true',
help='Load uniqueIds from a file to reindex')
parser.add_argument(
'--parallel', action='store_true',
help='process via job queue instead of directly')
parser.add_argument(
'--enrich', action='store_true',
help='Perform TMS analyze/enrich prior to indexing')
parser.add_argument(
'--publish', action='store_true',
help='Perform TMS publish after indexing')
args = parser.parse_args()
ids = args.ids
if args.file:
if len(args.ids) > 1:
raise Exception("Only one file can be passed!")
with open(args.ids[0], 'r') as f:
ids = f.read().splitlines()
for id in ids:
if args.parallel:
index_parallel.delay(id, args.enrich, args.publish)
else:
index(
zeit.cms.interfaces.ICMSContent(id),
enrich=args.enrich, update_keywords=args.enrich,
publish=args.publish)
|
Python
| 0
|
@@ -3752,222 +3752,8 @@
dy.%0A
- # Note: This only works when content is already published in%0A # TMS, but for a large-scale reindex where we don't want to%0A # have to enrich again that's probably fine.%0A
|
59e64609b78b12447eadb793c24236d150ffc3d4
|
remove unneeded condition
|
src/h2/frame_buffer.py
|
src/h2/frame_buffer.py
|
# -*- coding: utf-8 -*-
"""
h2/frame_buffer
~~~~~~~~~~~~~~~
A data structure that provides a way to iterate over a byte buffer in terms of
frames.
"""
from hyperframe.exceptions import InvalidFrameError
from hyperframe.frame import (
Frame, HeadersFrame, ContinuationFrame, PushPromiseFrame
)
from .exceptions import (
ProtocolError, FrameTooLargeError, FrameDataMissingError
)
# To avoid a DOS attack based on sending loads of continuation frames, we limit
# the maximum number we're perpared to receive. In this case, we'll set the
# limit to 64, which means the largest encoded header block we can receive by
# default is 262144 bytes long, and the largest possible *at all* is 1073741760
# bytes long.
#
# This value seems reasonable for now, but in future we may want to evaluate
# making it configurable.
CONTINUATION_BACKLOG = 64
class FrameBuffer:
"""
This is a data structure that expects to act as a buffer for HTTP/2 data
that allows iteraton in terms of H2 frames.
"""
def __init__(self, server=False):
self.data = b''
self.max_frame_size = 0
self._preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' if server else b''
self._preamble_len = len(self._preamble)
self._headers_buffer = []
def add_data(self, data):
"""
Add more data to the frame buffer.
:param data: A bytestring containing the byte buffer.
"""
if self._preamble_len:
data_len = len(data)
of_which_preamble = min(self._preamble_len, data_len)
if self._preamble[:of_which_preamble] != data[:of_which_preamble]:
raise ProtocolError("Invalid HTTP/2 preamble.")
data = data[of_which_preamble:]
self._preamble_len -= of_which_preamble
self._preamble = self._preamble[of_which_preamble:]
self.data += data
def _parse_frame_header(self, data):
"""
Parses the frame header from the data. Either returns a tuple of
(frame, length), or throws an exception. The returned frame may be None
if the frame is of unknown type.
"""
try:
frame, length = Frame.parse_frame_header(data[:9])
except ValueError as e:
# The frame header is invalid. This is a ProtocolError
raise ProtocolError("Invalid frame header received: %s" % str(e))
return frame, length
def _validate_frame_length(self, length):
"""
Confirm that the frame is an appropriate length.
"""
if length > self.max_frame_size:
raise FrameTooLargeError(
"Received overlong frame: length %d, max %d" %
(length, self.max_frame_size)
)
def _update_header_buffer(self, f):
"""
Updates the internal header buffer. Returns a frame that should replace
the current one. May throw exceptions if this frame is invalid.
"""
# Check if we're in the middle of a headers block. If we are, this
# frame *must* be a CONTINUATION frame with the same stream ID as the
# leading HEADERS or PUSH_PROMISE frame. Anything else is a
# ProtocolError. If the frame *is* valid, append it to the header
# buffer.
if self._headers_buffer:
stream_id = self._headers_buffer[0].stream_id
valid_frame = (
f is not None and
isinstance(f, ContinuationFrame) and
f.stream_id == stream_id
)
if not valid_frame:
raise ProtocolError("Invalid frame during header block.")
# Append the frame to the buffer.
self._headers_buffer.append(f)
if len(self._headers_buffer) > CONTINUATION_BACKLOG:
raise ProtocolError("Too many continuation frames received.")
# If this is the end of the header block, then we want to build a
# mutant HEADERS frame that's massive. Use the original one we got,
# then set END_HEADERS and set its data appopriately. If it's not
# the end of the block, lose the current frame: we can't yield it.
if 'END_HEADERS' in f.flags:
f = self._headers_buffer[0]
f.flags.add('END_HEADERS')
f.data = b''.join(x.data for x in self._headers_buffer)
self._headers_buffer = []
else:
f = None
elif (isinstance(f, (HeadersFrame, PushPromiseFrame)) and
'END_HEADERS' not in f.flags):
# This is the start of a headers block! Save the frame off and then
# act like we didn't receive one.
self._headers_buffer.append(f)
f = None
return f
# The methods below support the iterator protocol.
def __iter__(self):
return self
def __next__(self):
# First, check that we have enough data to successfully parse the
# next frame header. If not, bail. Otherwise, parse it.
if len(self.data) < 9:
raise StopIteration()
try:
f, length = self._parse_frame_header(self.data)
except InvalidFrameError: # pragma: no cover
raise ProtocolError("Received frame with invalid frame header.")
# Next, check that we have enough length to parse the frame body. If
# not, bail, leaving the frame header data in the buffer for next time.
if len(self.data) < length + 9:
raise StopIteration()
# Confirm the frame has an appropriate length.
self._validate_frame_length(length)
# Don't try to parse the body if we didn't get a frame we know about:
# there's nothing we can do with it anyway.
if f is not None:
try:
f.parse_body(memoryview(self.data[9:9+length]))
except InvalidFrameError:
raise FrameDataMissingError("Frame data missing or invalid")
# At this point, as we know we'll use or discard the entire frame, we
# can update the data.
self.data = self.data[9+length:]
# Pass the frame through the header buffer.
f = self._update_header_buffer(f)
# If we got a frame we didn't understand or shouldn't yield, rather
# than return None it'd be better if we just tried to get the next
# frame in the sequence instead. Recurse back into ourselves to do
# that. This is safe because the amount of work we have to do here is
# strictly bounded by the length of the buffer.
return f if f is not None else self.__next__()
|
Python
| 0
|
@@ -5688,15 +5688,9 @@
#
-Don't t
+T
ry t
@@ -5705,135 +5705,19 @@
the
-body if we didn't get a frame we know about:%0A # there's nothing we can do with it anyway.%0A if f is not None:%0A
+frame body%0A
@@ -5717,36 +5717,32 @@
dy%0A try:%0A
-
f.pa
@@ -5789,28 +5789,24 @@
%5D))%0A
-
except Inval
@@ -5819,20 +5819,16 @@
eError:%0A
-
|
a4db65ff4c5b3edd4739b0864f4e1641b37b3b87
|
Remove wrong comment
|
setuptools/tests/test_logging.py
|
setuptools/tests/test_logging.py
|
import inspect
import logging
import os
import pytest
setup_py = """\
from setuptools import setup
setup(
name="test_logging",
version="0.0"
)
"""
@pytest.mark.parametrize(
"flag, expected_level", [("--dry-run", "INFO"), ("--verbose", "DEBUG")]
)
def test_verbosity_level(tmp_path, monkeypatch, flag, expected_level):
"""Make sure the correct verbosity level is set (issue #3038)"""
import setuptools # noqa: Import setuptools to monkeypatch distutils
import distutils # <- load distutils after all the patches take place
logger = logging.Logger(__name__)
monkeypatch.setattr(logging, "root", logger)
unset_log_level = logger.getEffectiveLevel()
assert logging.getLevelName(unset_log_level) == "NOTSET"
setup_script = tmp_path / "setup.py"
setup_script.write_text(setup_py)
dist = distutils.core.run_setup(setup_script, stop_after="init")
dist.script_args = [flag, "sdist"]
dist.parse_command_line() # <- where the log level is set
log_level = logger.getEffectiveLevel()
log_level_name = logging.getLevelName(log_level)
assert log_level_name == expected_level
def test_patching_does_not_cause_problems():
# Ensure `dist.log` is only patched if necessary
import setuptools.logging
from distutils import dist # <- load distutils after all the patches take place
setuptools.logging.configure()
if os.getenv("SETUPTOOLS_USE_DISTUTILS", "local").lower() == "local":
# Modern logging infra, no problematic patching.
assert isinstance(dist.log, logging.Logger)
else:
assert inspect.ismodule(dist.log)
|
Python
| 0
|
@@ -1301,62 +1301,8 @@
dist
- # %3C- load distutils after all the patches take place
%0A%0A
|
ef4e84d2defbf4899f0a1745fce5162e2510c1f7
|
test "merge-patches --help"
|
rhcephpkg/tests/test_merge_patches.py
|
rhcephpkg/tests/test_merge_patches.py
|
import pytest
import subprocess
from rhcephpkg import MergePatches
from rhcephpkg.tests.util import CallRecorder
def git(*args):
""" shortcut for shelling out to git """
cmd = ['git'] + list(args)
subprocess.check_call(cmd)
class TestMergePatches(object):
def test_on_debian_branch(self, testpkg, monkeypatch):
# set our current branch to be a debian branch:
git('checkout', 'ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run()
# Verify that we run the "git fetch" command here.
expected = ['git', 'fetch', '.',
'patches/ceph-2-rhel-patches:patch-queue/ceph-2-ubuntu']
assert recorder.args == expected
def test_on_patch_queue_branch(self, testpkg, monkeypatch):
# set our current branch to be a patch-queue branch:
git('checkout', 'patch-queue/ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run()
# Verify that we run the "git merge" command here.
expected = ['git', 'pull', '--ff-only', 'patches/ceph-2-rhel-patches']
assert recorder.args == expected
def test_force_on_debian_branch(self, testpkg, monkeypatch):
# set current_branch() to a debian branch:
git('checkout', 'ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run(force=True)
# Verify that we run the "git push" command here.
expected = ['git', 'push', '.',
'+patches/ceph-2-rhel-patches:patch-queue/ceph-2-ubuntu']
assert recorder.args == expected
def test_force_on_patch_queue_branch(self, testpkg, monkeypatch):
# set current_branch() to a patch-queue branch:
git('checkout', 'patch-queue/ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run(force=True)
# Verify that we run the "git reset" command here.
expected = ['git', 'reset', '--hard', 'patches/ceph-2-rhel-patches']
assert recorder.args == expected
class TestMergePatchesRhelPatchesBranch(object):
@pytest.mark.parametrize('debian_branch,expected', [
('ceph-1.3-ubuntu', 'ceph-1.3-rhel-patches'),
('ceph-2-ubuntu', 'ceph-2-rhel-patches'),
('ceph-2-trusty', 'ceph-2-rhel-patches'),
('ceph-2-xenial', 'ceph-2-rhel-patches'),
('someotherproduct-2-ubuntu', 'someotherproduct-2-rhel-patches'),
('ceph-2-ubuntu-hotfix-bz123', 'ceph-2-rhel-patches-hotfix-bz123'),
('ceph-2-ubuntu-test-bz456', 'ceph-2-rhel-patches-test-bz456'),
])
def test_get_rhel_patches_branch(self, debian_branch, expected):
m = MergePatches([])
assert m.get_rhel_patches_branch(debian_branch) == expected
|
Python
| 0.000001
|
@@ -262,24 +262,306 @@
s(object):%0A%0A
+ def test_help(self, capsys):%0A mergep = MergePatches(%5B'rhcephpkg', 'merge-patches', '--help'%5D)%0A with pytest.raises(SystemExit):%0A mergep.main()%0A out, _ = capsys.readouterr()%0A assert %22Fetch the latest patches branch that rdopkg uses%22 in out%0A%0A
def test
|
7bccd20523f96728db7a6b5fd23cb339787ecd3a
|
Bump to 1.1.4
|
jnius/__init__.py
|
jnius/__init__.py
|
'''
Pyjnius
=======
Accessing Java classes from Python.
All the documentation is available at: http://pyjnius.readthedocs.org
'''
__version__ = '1.1.4.dev0'
from .jnius import * # noqa
from .reflect import * # noqa
from six import with_metaclass
# XXX monkey patch methods that cannot be in cython.
# Cython doesn't allow to set new attribute on methods it compiled
HASHCODE_MAX = 2 ** 31 - 1
class PythonJavaClass_(with_metaclass(MetaJavaBase, PythonJavaClass)):
@java_method('()I', name='hashCode')
def hashCode(self):
return id(self) % HASHCODE_MAX
@java_method('()Ljava/lang/String;', name='hashCode')
def hashCode_(self):
return '{}'.format(self.hashCode())
@java_method('()Ljava/lang/String;', name='toString')
def toString(self):
return repr(self)
@java_method('(Ljava/lang/Object;)Z', name='equals')
def equals(self, other):
return self.hashCode() == other.hashCode()
PythonJavaClass = PythonJavaClass_
# from https://gist.github.com/tito/09c42fb4767721dc323d
import os
if "ANDROID_ARGUMENT" in os.environ:
# on android, catch all exception to ensure about a jnius.detach
import threading
import jnius
orig_thread_run = threading.Thread.run
def jnius_thread_hook(*args, **kwargs):
try:
return orig_thread_run(*args, **kwargs)
finally:
jnius.detach()
threading.Thread.run = jnius_thread_hook
|
Python
| 0.000084
|
@@ -150,13 +150,8 @@
.1.4
-.dev0
'%0A%0Af
|
6a27bd99352e4dc7f38c6f819a8a45b37c1a094c
|
Remove TODO to add requirements.txt
|
start-active-players.py
|
start-active-players.py
|
"""
Start active players for the week
Ideas:
- Include the names of players who cannot be started
- And maybe the full roster on those dates
TODO:
- Add required packages in requirements.txt
"""
import requests
from bs4 import BeautifulSoup
# TODO: Configure this somewhere better (as a direct argument to the script, probably
TEAM_URL = 'http://basketball.fantasysports.yahoo.com/nba/178276/6/'
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'
}
response = requests.get(TEAM_URL, headers=headers)
soup = BeautifulSoup(response.text)
inputs = soup.find(id='hiddens').findAll('input')
fields = {input['name']: input['value'] for input in inputs}
print(fields)
|
Python
| 0
|
@@ -142,59 +142,8 @@
ates
-%0A%0ATODO:%0A- Add required packages in requirements.txt
%0A%22%22%22
|
26f9375c45e29dfdfd5b3611cdf86e87ea7f9801
|
Add reference, fix docstring
|
statsmodels/regression/dimred.py
|
statsmodels/regression/dimred.py
|
import numpy as np
from statsmodels.base import model
import statsmodels.base.wrapper as wrap
import pandas as pd
class DimReductionRegression(model.Model):
def __init__(self, endog, exog, **kwargs):
super(DimReductionRegression, self).__init__(endog, exog, **kwargs)
def _prep(self, n_slice):
# Sort the data by endog
ii = np.argsort(self.endog)
y = self.endog[ii]
x = self.exog[ii, :]
# Whiten the data
x -= x.mean(0)
covx = np.cov(x.T)
covxr = np.linalg.cholesky(covx)
x = np.linalg.solve(covxr, x.T).T
self.wexog = x
self._covxr = covxr
# Split the data into slices
self._split_wexog = np.array_split(x, n_slice)
class SIR(DimReductionRegression):
"""
Sliced Inverse Regression (SIR)
Parameters
----------
endog : array-like (1d)
The dependent variable
exog : array-like (2d)
The covariates
References
----------
KC Li (1991). Sliced inverse regression for dimension reduction.
JASA 86, 316-342.
"""
def __init__(self, endog, exog, **kwargs):
super(SIR, self).__init__(endog, exog, **kwargs)
def fit(self, slice_n=20):
"""
Estimate the EDR space.
Parameters
----------
slice_n : int
Number of observations per slice
"""
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
mn = [z.mean(0) for z in self._split_wexog]
n = [z.shape[0] for z in self._split_wexog]
mn = np.asarray(mn)
n = np.asarray(n)
mnc = np.cov(mn.T, fweights=n)
a, b = np.linalg.eigh(mnc)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class PHD(DimReductionRegression):
"""
Principal Hessian Directions
Parameters
----------
endog : array-like (1d)
The dependent variable
exog : array-like (2d)
The covariates
References
----------
"""
def __init__(self, endog, exog, **kwargs):
super(PHD, self).__init__(endog, exog, **kwargs)
def fit(self, resid=False):
"""
Estimate the EDR space using PHD.
Parameters
----------
resid : bool
"""
y = self.endog - self.endog.mean()
x = self.exog - self.exog.mean(0)
if resid:
from statsmodels.regression.linear_model import OLS
r = OLS(y, x).fit()
y = r.resid
cm = np.einsum('i,ij,ik->jk', y, x, x)
cm /= len(y)
cx = np.cov(x.T)
cb = np.linalg.solve(cx, cm)
a, b = np.linalg.eig(cb)
jj = np.argsort(-np.abs(a))
a = a[jj]
params = b[:, jj]
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class SAVE(DimReductionRegression):
"""
Sliced Average Variance Estimation (SAVE)
Parameters
----------
endog : array-like (1d)
The dependent variable
exog : array-like (2d)
The covariates
Keyword parameters
------------------
bc : bool
If True, use the bias-correctedCSAVE method of Li and Zhu.
References
----------
RD Cook. SAVE: A method for dimension reduction and graphics
in regression.
http://www.stat.umn.edu/RegGraph/RecentDev/save.pdf
Y Li, L-X Zhu (2007). Asymptotics for sliced average
variance estimation. The Annals of Statistics.
https://arxiv.org/pdf/0708.0462.pdf
"""
def __init__(self, endog, exog, **kwargs):
super(SAVE, self).__init__(endog, exog, **kwargs)
self.bc = False
if "bc" in kwargs.keys() and kwargs["bc"] == True:
self.bc = True
def fit(self, slice_n=50):
"""
Estimate the EDR space.
Parameters
----------
slice_n : int
Number of observations per slice
"""
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
cv = [np.cov(z.T) for z in self._split_wexog]
n = [z.shape[0] for z in self._split_wexog]
p = self.wexog.shape[1]
if not self.bc:
# Cook's original approach
vm = 0
for i in range(len(cv)):
icv = np.eye(p) - cv[i]
vm += n[i] * np.dot(icv, icv)
vm /= len(cv)
else:
# The bias-corrected approach of Li and Zhu
# \Lambda_n in Li, Zhu
av = 0
for c in cv:
av += np.dot(c, c)
av /= len(cv)
# V_n in Li, Zhu
vn = 0
for j, x in enumerate(self._split_wexog):
r = x - x.mean(0)
for i in range(r.shape[0]):
u = r[i, :]
m = np.outer(u, u)
vn += np.dot(m, m)
vn /= self.exog.shape[0]
c = np.mean(n)
k1 = c * (c - 1) / ((c - 1)**2 + 1)
k2 = (c - 1) / ((c - 1)**2 + 1)
av2 = k1 * av - k2 * vn
vm = np.eye(p) - 2 * sum(cv) / len(cv) + av2
a, b = np.linalg.eigh(vm)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class DimReductionResults(model.Results):
def __init__(self, model, params, eigs):
super(DimReductionResults, self).__init__(
model, params)
self.eigs = eigs
class DimReductionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
}
_wrap_attrs = _attrs
wrap.populate_wrapper(DimReductionResultsWrapper,
DimReductionResults)
|
Python
| 0.000001
|
@@ -2202,24 +2202,182 @@
----------%0A
+ KC Li (1992). On Principal Hessian Directions for Data%0A Visualization and Dimension Reduction: Another application%0A of Stein's lemma. JASA 87:420.%0A
%22%22%22%0A%0A
@@ -2625,16 +2625,187 @@
: bool%0A
+ If True, use least squares regression to remove the%0A linear relationship between each covariate and the%0A response, before conducting PHD.
%0A
|
a6390df0f4fb9c9402b1c795d4bf65765b793412
|
Bump dallinger version
|
dallinger/version.py
|
dallinger/version.py
|
"""Dallinger version number."""
__version__ = "5.0.5"
|
Python
| 0
|
@@ -45,11 +45,11 @@
= %225.0.
-5
+6
%22%0A
|
75d2f1aad9aa88926fce27d49c4e452eb571fc14
|
Update the lexer
|
cycli/lexer.py
|
cycli/lexer.py
|
import re
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, Number, Token
__all__ = ["CypherLexer"]
class CypherLexer(RegexLexer):
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'\b(ABS|ACOS|ALLSHORTESTPATHS|ASIN|ATAN|ATAN2|AVG|CEIL|COALESCE|COLLECT'
r'|COS|COT|COUNT|DEGREES|E|ENDNODE|EXP|EXTRACT|FILTER|FLOOR'
r'|HAVERSIN|HEAD|ID|KEYS|LABELS|LAST|LEFT|LENGTH|LIKE|LOAD CSV|LOG|LOG10'
r'|LOWER|LTRIM|MAX|MIN|NODE|NODES|PERCENTILECONT|PERCENTILEDISC|PI|RADIANS'
r'|RAND|RANGE|REDUCE|REL|RELATIONSHIP|RELATIONSHIPS|REPLACE|RIGHT|ROUND|RTRIM'
r'|SHORTESTPATH|SIGN|SIN|SPLIT|SQRT|STARTNODE|STDEV|STDEVP|STR|SUBSTRING'
r'|SUM|TAIL|TAN|TIMESTAMP|TOFLOAT|TOINT|TRIM|TYPE|UPPER|ALL'
r'|AND|ANY|HAS|IN|NONE|NOT|OR|SINGLE|XOR|AS'
r'|ASC|ASCENDING|ASSERT|BY|CASE|COMMIT|CONSTRAINT|CREATE|CYPHER'
r'|DELETE|DESC|DESCENDING|DISTINCT|DROP|ELSE|END|EXPLAIN|FALSE|FIELDTERMINATOR'
r'|FOREACH|FROM|WITH HEADERS|IN|INDEX|IS|LIMIT|LOAD|MATCH|MERGE'
r'|NULL|ON|OPTIONAL|ORDER|PERIODIC|PROFILE|REMOVE|RETURN|SCAN|SET'
r'|SKIP|START|THEN|TRUE|UNION|UNIQUE|UNWIND|USING|WHEN|WHERE|WITH)\b', Keyword),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Name),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'-->|<--|\]->|<-\[|\)-\[|\]-\(|\[|\]-|\[|\]', Token.Pattern),
(r'\.', Token.Pattern),
(r'\(|\)', Token.Pattern)
]
}
|
Python
| 0
|
@@ -1710,50 +1710,67 @@
(r'
---%3E%7C%3C--%7C%5C%5D-%3E%7C%3C-%5C%5B%7C%5C)-%5C%5B%7C%5C%5D-
+%5B-%5C)%5C%5D%5D-%5B%3E%5C(%5D%7C%5B%3C%5C)%5D-%5B-%5C(%5C%5B%5D%7C%5B%5C%5D%5C)%5D-%7C-%5B
%5C(
-%7C
%5C%5B
+%5D%7C--%3E%7C%3C--
%7C%5C%5D-%7C
+-
%5C%5B
-%7C%5C%5D
', T
@@ -1840,16 +1840,26 @@
(r'%5C(%7C%5C)
+%7C%5C%5D%7C%5C%5B%7C%7B%7C%7D
', Token
|
a98096f129165be003294eaa5ad3596931c58ae7
|
Use per-request database connections.
|
nmhive.py
|
nmhive.py
|
#!/usr/bin/env python
import json
import mailbox
import os
import tempfile
import urllib.request
import flask
import flask_cors
import notmuch
app = flask.Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
flask_cors.CORS(app)
TAG_PREFIX = os.getenv('NMBPREFIX', 'notmuch::')
NOTMUCH = None
_TAGS = {}
@app.route('/tags', methods=['GET'])
def tags():
tags = set()
for t in NOTMUCH.get_all_tags():
if t.startswith(TAG_PREFIX):
tags.add(t[len(TAG_PREFIX):])
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/mid/<message_id>', methods=['GET', 'POST'])
def message_id_tags(message_id):
if flask.request.method == 'POST':
tags = _TAGS.get(message_id, set())
new_tags = tags.copy()
for change in flask.request.get_json():
if change.startswith('+'):
new_tags.add(change[1:])
elif change.startswith('-'):
try:
new_tags.remove(change[1:])
except KeyError:
return flask.Response(status=400)
else:
return flask.Response(status=400)
_TAGS[message_id] = new_tags
return flask.Response(
response=json.dumps(sorted(new_tags)),
mimetype='application/json')
elif flask.request.method == 'GET':
try:
tags = _TAGS[message_id]
except KeyError:
return flask.Response(status=404)
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/gmane/<group>/<int:article>', methods=['GET'])
def gmane_message_id(group, article):
url = 'http://download.gmane.org/{}/{}/{}'.format(
group, article, article + 1)
response = urllib.request.urlopen(url=url, timeout=3)
mbox_bytes = response.read()
with tempfile.NamedTemporaryFile(prefix='nmbug-', suffix='.mbox') as f:
f.write(mbox_bytes)
mbox = mailbox.mbox(path=f.name)
_, message = mbox.popitem()
message_id = message['message-id']
return flask.Response(
response=message_id.lstrip('<').rstrip('>'),
mimetype='text/plain')
if __name__ == '__main__':
NOTMUCH = notmuch.Database(
path=None,
mode=notmuch.Database.MODE.READ_WRITE)
app.run(host='0.0.0.0')
|
Python
| 0
|
@@ -294,16 +294,21 @@
MUCH
+_PATH
= None%0A
_TAG
@@ -307,19 +307,8 @@
one%0A
-_TAGS = %7B%7D%0A
%0A%0A@a
@@ -379,24 +379,89 @@
-for t in NOTMUCH
+database = notmuch.Database(path=NOTMUCH_PATH)%0A try:%0A for t in database
.get
@@ -481,16 +481,20 @@
+
+
if t.sta
@@ -522,24 +522,28 @@
+
tags.add(t%5Bl
@@ -556,24 +556,62 @@
_PREFIX):%5D)%0A
+ finally:%0A database.close()%0A
return f
@@ -2395,106 +2395,8 @@
_':%0A
- NOTMUCH = notmuch.Database(%0A path=None,%0A mode=notmuch.Database.MODE.READ_WRITE)%0A
|
0983331773982b2bae6b92a0350a91aefbe6481e
|
Use the `note` style box. Refs #11725.
|
contrib/help_guide_version_notice.py
|
contrib/help_guide_version_notice.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from pkg_resources import resource_listdir
from trac.config import ListOption, Option
from trac.core import Component, implements
from trac.web.api import IRequestFilter
NOTICE_TEMPLATE = """\
{{{#!box
**Note:** this page documents the %(release)s (%(desc)s) release.
See [[%(alt_page)s]] if you need the %(alt_desc)s version.
}}}
"""
class HelpGuideVersionNotice(Component):
"""Adds a version notice to pages in the Help/Guide with a link to
the previous or current version of the page in the guide. The
WikiExtraPlugin needs to be installed for pretty rendering of the
notice using the `box` WikiProcessor.
"""
implements(IRequestFilter)
lts_release = Option('teo', 'lts_release', '0.12',
doc="Version of the LTS release of Trac.")
stable_release = Option('teo', 'stable_release', '1.0',
doc="Version of the stable release of Trac.")
dev_release = Option('teo', 'dev_release', '1.1',
doc="Version of the dev release of Trac.")
ignored_pages = ListOption('teo', 'ignored_pages',
'WikiStart, TitleIndex',
doc="List of pages to ignore.")
def __init__(self):
self.default_pages = resource_listdir('trac.wiki', 'default-pages')
for page in self.ignored_pages:
self.default_pages.remove(page)
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if data and 'page' in data and 'text' in data:
name = data['page'].name
notice = ""
if name in self.default_pages:
alt_page = self.lts_release + '/' + name
notice = NOTICE_TEMPLATE % {'release': self.stable_release,
'desc': 'latest stable',
'alt_page': alt_page,
'alt_desc': 'previous'}
elif name.startswith(self.lts_release) and \
name[len(self.lts_release)+1:] in self.default_pages:
alt_page = '../../' + name[len(self.lts_release)+1:]
notice = NOTICE_TEMPLATE % {'release': self.lts_release,
'desc': 'maintenance',
'alt_page': alt_page,
'alt_desc': 'latest stable'}
elif name.startswith(self.dev_release) and \
name[len(self.dev_release)+1:] in self.default_pages:
alt_page = '../../' + name[len(self.dev_release)+1:]
notice = NOTICE_TEMPLATE % {'release': self.dev_release,
'desc': 'development',
'alt_page': alt_page,
'alt_desc': 'latest stable'}
data['text'] = notice + data['text']
return template, data, content_type
|
Python
| 0.000001
|
@@ -684,20 +684,15 @@
!box
-%0A**Note:** t
+ note%0AT
his
|
ebaa0aec67a7eb64a0d8da52807c2773c89fb6a6
|
version bump to 0.1.24
|
contact_form/__init__.py
|
contact_form/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (0, 1, 23)
def get_version():
"""Returns the version as a human-format string."""
return '.'.join([str(i) for i in VERSION])
__author__ = 'dlancer'
__docformat__ = 'restructuredtext en'
__copyright__ = 'Copyright 2014-2015, dlancer'
__license__ = 'BSD'
__version__ = get_version()
__maintainer__ = 'dlancer'
__email__ = 'dmdpost@gmail.com'
__status__ = 'Development'
# default the contact_form app config (only for Django v1.7+)
default_app_config = 'contact_form.apps.AppConfig'
|
Python
| 0
|
@@ -40,9 +40,9 @@
1, 2
-3
+4
)%0A%0A%0A
|
106c7dedd1c4f2d3ba189b25e124c1879f9f80d8
|
Print newline when skipping hdf5 backtrace error message
|
into/backends/tests/test_pytables.py
|
into/backends/tests/test_pytables.py
|
from __future__ import absolute_import, division, print_function
import numpy as np
import datashape as ds
import pytest
tb = pytest.importorskip('tables')
from into import into
from into.utils import tmpfile
from into.backends.pytables import PyTables, discover
import os
try:
f = tb.open_file('import-tables-test.hdf5', mode='w')
f.close()
if os.path.exists('import-tables-test.hdf5'):
os.remove('import-tables-test.hdf5')
except tb.exceptions.HDF5ExtError as e:
pytest.skip('Cannot write file, error: %s' % e)
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@pytest.yield_fixture
def tbfile():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.close()
f.close()
yield filename
now = np.datetime64('now').astype('datetime64[us]')
raw_dt_data = [(1, 'Alice', 100, now),
(2, 'Bob', -200, now),
(3, 'Charlie', 300, now),
(4, 'Denis', 400, now),
(5, 'Edith', -500, now)]
dt_data = np.array(raw_dt_data, dtype=np.dtype([('id', 'i8'),
('name', 'S7'),
('amount', 'f8'),
('date', 'M8[ms]')]))
@pytest.yield_fixture
def dt_tb():
class Desc(tb.IsDescription):
id = tb.Int64Col(pos=0)
name = tb.StringCol(itemsize=7, pos=1)
amount = tb.Float64Col(pos=2)
date = tb.Time64Col(pos=3)
non_date_types = list(zip(['id', 'name', 'amount'], ['i8', 'S7', 'f8']))
# has to be in microseconds as per pytables spec
dtype = np.dtype(non_date_types + [('date', 'M8[us]')])
rec = dt_data.astype(dtype)
# also has to be a floating point number
dtype = np.dtype(non_date_types + [('date', 'f8')])
rec = rec.astype(dtype)
rec['date'] /= 1e6
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'dt', description=Desc)
d.append(rec)
d.close()
f.close()
yield filename
class TestPyTablesLight(object):
def test_read(self, tbfile):
t = PyTables(path=tbfile, datapath='/title')
shape = t.shape
t._v_file.close()
assert shape == (5,)
def test_write_no_dshape(self, tbfile):
with pytest.raises(ValueError):
PyTables(path=tbfile, datapath='/write_this')
def test_write_with_dshape(self, tbfile):
f = tb.open_file(tbfile, mode='a')
try:
assert '/write_this' not in f
finally:
f.close()
del f
# create our table
dshape = '{id: int, name: string[7, "ascii"], amount: float32}'
t = PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
shape = t.shape
filename = t._v_file.filename
t._v_file.close()
assert filename == tbfile
assert shape == (0,)
@pytest.mark.xfail(reason="Poor datetime support")
def test_table_into_ndarray(self, dt_tb):
t = PyTables(dt_tb, '/dt')
res = into(np.ndarray, t)
try:
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_ndarray_into_table(self, dt_tb):
dtype = ds.from_numpy(dt_data.shape, dt_data.dtype)
t = PyTables(dt_tb, '/out', dtype)
try:
res = into(np.ndarray, into(t, dt_data, filename=dt_tb, datapath='/out'))
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
@pytest.mark.xfail(reason="Poor datetime support")
def test_datetime_discovery(self, dt_tb):
t = PyTables(dt_tb, '/dt')
lhs, rhs = map(discover, (t, dt_data))
t._v_file.close()
assert lhs == rhs
def test_node_discover(self, dt_tb):
root = PyTables(dt_tb, '/')
result = discover(root)
expected = ds.dshape("""{dt: 5 * {id: int64,
name: string[7, "A"],
amount: float64,
date: float64}}""")
assert result == expected.measure
root._v_file.close()
|
Python
| 0.000001
|
@@ -524,17 +524,18 @@
, error:
-
+%5Cn
%25s' %25 e)
|
6320296974895b41ff87a007a73752c816db2a3d
|
improve one help string
|
bin/addons/base/ir/ir_cron.py
|
bin/addons/base/ir/ir_cron.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
import netsvc
import tools
from tools.safe_eval import safe_eval as eval
import pooler
from osv import fields, osv
def str2tuple(s):
return eval('tuple(%s)' % (s or ''))
_intervalTypes = {
'work_days': lambda interval: relativedelta(days=interval),
'days': lambda interval: relativedelta(days=interval),
'hours': lambda interval: relativedelta(hours=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'minutes': lambda interval: relativedelta(minutes=interval),
}
class ir_cron(osv.osv, netsvc.Agent):
_name = "ir.cron"
_columns = {
'name': fields.char('Name', size=60, required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'active': fields.boolean('Active'),
'interval_number': fields.integer('Interval Number',help="Repeat every x."),
'interval_type': fields.selection( [('minutes', 'Minutes'),
('hours', 'Hours'), ('work_days','Work Days'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'numbercall': fields.integer('Number of Calls', help='Number of time the function is called,\na negative number indicates no limit'),
'doall' : fields.boolean('Repeat Missed', help="Enable this if you want to execute missed occurences as soon as the server restarts."),
'nextcall' : fields.datetime('Next Execution Date', required=True, help="Next planned execution date for this scheduler"),
'model': fields.char('Object', size=64, help="Name of object whose function will be called when this scheduler will run. e.g. 'res.partener'"),
'function': fields.char('Function', size=64, help="Name of the method to be called on the object when this scheduler is executed."),
'args': fields.text('Arguments', help="Arguments to be passed to the method. e.g. (uid,)"),
'priority': fields.integer('Priority', help='0=Very Urgent\n10=Not urgent')
}
_defaults = {
'nextcall' : lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'priority' : 5,
'user_id' : lambda obj,cr,uid,context: uid,
'interval_number' : 1,
'interval_type' : 'months',
'numbercall' : 1,
'active' : 1,
'doall' : 1
}
def _check_args(self, cr, uid, ids, context=None):
try:
for this in self.browse(cr, uid, ids, context):
str2tuple(this.args)
except:
return False
return True
_constraints = [
(_check_args, 'Invalid arguments', ['args']),
]
def _callback(self, cr, uid, model, func, args):
args = str2tuple(args)
m = self.pool.get(model)
if m and hasattr(m, func):
f = getattr(m, func)
try:
f(cr, uid, *args)
except Exception, e:
self._logger.notifyChannel('timers', netsvc.LOG_ERROR, "Job call of self.pool.get('%s').%s(cr, uid, *%r) failed" % (model, func, args))
self._logger.notifyChannel('timers', netsvc.LOG_ERROR, tools.exception_to_unicode(e))
def _poolJobs(self, db_name, check=False):
try:
db, pool = pooler.get_db_and_pool(db_name)
except:
return False
cr = db.cursor()
try:
if not pool._init:
now = datetime.now()
cr.execute('SELECT * FROM ir_cron '
'WHERE numbercall<>0 AND active AND nextcall<=now() '
'ORDER BY priority', debug=self._debug)
for job in cr.dictfetchall():
nextcall = datetime.strptime(job['nextcall'], '%Y-%m-%d %H:%M:%S')
numbercall = job['numbercall']
ok = False
while nextcall < now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(cr, job['user_id'], job['model'], job['function'], job['args'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql = ''
if not numbercall:
addsql = ', active=False'
cr.execute("UPDATE ir_cron "
"SET nextcall=%s, numbercall=%s"+addsql+ \
" WHERE id=%s",
(nextcall.strftime('%Y-%m-%d %H:%M:%S'), numbercall, job['id']),
debug=self._debug)
cr.commit()
cr.execute('SELECT min(nextcall) AS min_next_call FROM ir_cron '
'WHERE numbercall<>0 AND active AND nextcall>=now()', debug=self._debug)
next_call = cr.dictfetchone()['min_next_call']
if next_call:
next_call = time.mktime(time.strptime(next_call, '%Y-%m-%d %H:%M:%S'))
else:
next_call = int(time.time()) + 3600 # if do not find active cron job from database, it will run again after 1 day
if not check:
self.setAlarm(self._poolJobs, next_call, db_name, db_name)
except Exception, ex:
logger = netsvc.Logger()
logger.notifyChannel('cron', netsvc.LOG_WARNING,
'Exception in cron:'+str(ex))
finally:
cr.commit()
cr.close()
def restart(self, dbname):
self.cancel(dbname)
self._poolJobs(dbname)
def create(self, cr, uid, vals, context=None):
res = super(ir_cron, self).create(cr, uid, vals, context=context)
cr.commit()
self.restart(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
res = super(ir_cron, self).write(cr, user, ids, vals, context=context)
cr.commit()
self.restart(cr.dbname)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(ir_cron, self).unlink(cr, uid, ids, context=context)
cr.commit()
self.restart(cr.dbname)
return res
ir_cron()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.999936
|
@@ -2748,17 +2748,16 @@
res.part
-e
ner'%22),%0A
|
43b0201573a7bef18347c47f0434444a30edc5b1
|
Use global parameters for port & fork_server
|
rosserial_python/nodes/serial_node.py
|
rosserial_python/nodes/serial_node.py
|
#!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import roslib; roslib.load_manifest("rosserial_python")
import rospy
from rosserial_python import SerialClient, RosSerialServer
import multiprocessing
import sys
if __name__=="__main__":
port_name = rospy.get_param('~port','/dev/ttyUSB0')
baud = int(rospy.get_param('~baud','57600'))
tcp_portnum = int(rospy.get_param('~tcp_port', '11411'))
fork_server = rospy.get_param('~fork_server', True)
sys.argv = rospy.myargv(argv=sys.argv)
#import pdb; pdb.set_trace()
if len(sys.argv) == 2 :
port_name = sys.argv[1]
if len(sys.argv) == 3 :
tcp_portnum = int(sys.argv[2])
if port_name == "tcp" :
server = RosSerialServer(tcp_portnum, fork_server)
rospy.loginfo("Waiting for socket connections on port %d" % tcp_portnum)
try:
server.listen()
except KeyboardInterrupt:
rospy.loginfo("got keyboard interrupt")
finally:
rospy.loginfo("Shutting down")
for process in multiprocessing.active_children():
rospy.loginfo("Shutting down process %r", process)
process.terminate()
process.join()
rospy.loginfo("All done")
else : # Use serial port
rospy.init_node("serial_node")
rospy.loginfo("ROS Serial Python Node")
rospy.loginfo("Connected on %s at %d baud" % (port_name,baud) )
client = SerialClient(port_name, baud)
try:
client.run()
except KeyboardInterrupt:
pass
|
Python
| 0
|
@@ -2090,17 +2090,41 @@
_param('
-~
+/rosserial_embeddedlinux/
tcp_port
@@ -2171,17 +2171,41 @@
_param('
-~
+/rosserial_embeddedlinux/
fork_ser
|
33d1e3f1d94c7af29742619df42983507e067568
|
Add VotableModelMixin
|
board/models.py
|
board/models.py
|
import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
nick = models.CharField(max_length=16)
def __str__(self):
return str(self.user)
class Board(models.Model):
name = models.CharField(max_length=16)
slug = models.SlugField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('board_post_list', kwargs={'board': self.slug})
class Category(models.Model):
board = models.ForeignKey('Board', related_name='categories')
name = models.CharField(max_length=8)
slug = models.SlugField()
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class Post(models.Model):
user = models.ForeignKey(User, blank=True, null=True, related_name='posts')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
board = models.ForeignKey('Board', related_name='posts')
category = models.ForeignKey('Category', blank=True, null=True, related_name='posts')
title = models.CharField(max_length=32)
contents = models.TextField()
tags = models.ManyToManyField(Tag, blank=True, null=True)
viewcount = models.PositiveIntegerField(default=0)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField()
@property
def votes(self):
vd = dict()
vd['upvote'] = self._votes.filter(vote=Vote.UPVOTE).count()
vd['downvote'] = self._votes.filter(vote=Vote.DOWNVOTE).count()
vd['total'] = vd['upvote'] - vd['downvote']
return vd
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if not kwargs.pop('auto_now', False):
self.modified_time = datetime.datetime.now()
super(Post, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey('Post', related_name='comments')
comment = models.ForeignKey('self', related_name='subcomments', blank=True, null=True)
user = models.ForeignKey(User, blank=True, null=True, related_name='comments')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
contents = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
class Vote(models.Model):
DOWNVOTE = 0
UPVOTE = 1
VOTE_CHOICES = (
(DOWNVOTE, 'Not recommend'),
(UPVOTE, 'Recommend'),
)
post = models.ForeignKey('Post', blank=True, null=True, related_name='_votes')
comment = models.ForeignKey('Comment', blank=True, null=True, related_name='_votes')
user = models.ForeignKey(User, blank=True, null=True, related_name='_votes')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
vote = models.PositiveSmallIntegerField(choices=VOTE_CHOICES)
class Announcement(models.Model):
post = models.OneToOneField('Post', related_name='announcement')
boards = models.ManyToManyField('Board', related_name='announcements')
|
Python
| 0
|
@@ -927,13 +927,324 @@
ass
-Post(
+VotableModelMixin:%0A @property%0A def votes(self):%0A vd = dict()%0A vd%5B'upvote'%5D = self._votes.filter(vote=Vote.UPVOTE).count()%0A vd%5B'downvote'%5D = self._votes.filter(vote=Vote.DOWNVOTE).count()%0A vd%5B'total'%5D = vd%5B'upvote'%5D - vd%5B'downvote'%5D%0A return vd%0A%0A%0Aclass Post(VotableModelMixin,
mode
@@ -1849,274 +1849,8 @@
()%0A%0A
- @property%0A def votes(self):%0A vd = dict()%0A vd%5B'upvote'%5D = self._votes.filter(vote=Vote.UPVOTE).count()%0A vd%5B'downvote'%5D = self._votes.filter(vote=Vote.DOWNVOTE).count()%0A vd%5B'total'%5D = vd%5B'upvote'%5D - vd%5B'downvote'%5D%0A return vd%0A%0A
@@ -2176,24 +2176,24 @@
**kwargs)%0A%0A%0A
-
class Commen
@@ -2190,24 +2190,43 @@
ass Comment(
+VotableModelMixin,
models.Model
|
56fee8518b9022b854f0bb300e8e44ec84539a29
|
Fix callback to send a bogus message which will close the connection
|
bokeh/client.py
|
bokeh/client.py
|
'''
'''
from __future__ import absolute_import, print_function
import logging
import random
log = logging.getLogger(__name__)
from tornado import gen
from tornado.httpclient import HTTPRequest
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.websocket import websocket_connect
from bokeh.server.exceptions import MessageError, ProtocolError, ValidationError
from bokeh.server.protocol.receiver import Receiver
from bokeh.server.protocol import Protocol
class ClientSession(object):
def __init__(self, url="ws://localhost:8888/ws", callbacks=None):
self._request = HTTPRequest(url, headers={"bokeh-protocol-version": "1.0"})
self._callbacks = callbacks
self._session_id = None
self._protocol = Protocol("1.0")
self._receiver = Receiver(self._protocol)
self._client = None
def connect(self):
loop = IOLoop.instance()
loop.add_callback(self._run)
try:
loop.start()
except KeyboardInterrupt:
if self._client is not None:
self._client.close(1000, "user interruption")
def send_message(self, message):
sent = message.send(self._client)
log.debug("Sent %r [%d bytes]", message, sent)
@gen.coroutine
def _run(self):
yield self._connect_async()
yield self._worker()
@gen.coroutine
def _connect_async(self):
self._client = yield websocket_connect(self._request)
@gen.coroutine
def _worker(self):
while True:
fragment = yield self._client.read_message()
if fragment is None:
# XXX Tornado doesn't give us the code and reason
log.info("Connection closed by server")
break
try:
message = yield self._receiver.consume(fragment)
except (MessageError, ProtocolError, ValidationError) as e:
log.error("%r", e)
raise e
if message:
log.debug("Received %r", message)
if message.msgtype is 'ACK':
self._session_id = message.header['sessid']
self._start_callbacks()
IOLoop.instance().stop()
def _callback_wrapper(self, func):
def wrapper():
func(self)
return wrapper
def _start_callbacks(self):
for cb, period in self._callbacks:
if period:
PeriodicCallback(self._callback_wrapper(cb),
period * 1000, # ms
).start()
else:
IOLoop.instance().add_callback(self._callback_wrapper(cb))
def foo(cli):
msg = Protocol("1.0").create('SERVER-INFO-REQ', cli._session_id)
cli.send_message(msg)
def bar(cli):
msg = Protocol("1.0").create('PULL-DOC-REQ', cli._session_id, "some_doc")
cli.send_message(msg)
def quux(cli):
log.warn("Deliberately wrong session ID")
msg = Protocol("1.0").create('SERVER-INFO-REQ', 'wrongsessid')
cli.send_message(msg)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
session = ClientSession(callbacks=[(foo, 0.8), (bar, 3.0), (quux, 30.0)])
session.connect()
|
Python
| 0
|
@@ -2950,138 +2950,101 @@
log.
-warn(%22Deliberately wrong session ID%22)%0A msg = Protocol(%221.0%22).create('SERVER-INFO-REQ', 'wrongsessid')%0A cli.send_message(msg)
+info(%22Sending deliberately bogus message%22)%0A cli._client.write_message(b%22xx%22, binary=True)%0A
%0A%0Aif
|
7ed8de3d15941c683ae70c15a6ce50bbe29a6580
|
remove unused field from books
|
books/models.py
|
books/models.py
|
from django.db import models
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import (FieldPanel,
InlinePanel)
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
# Create your models here.
class Book(Page):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
revision = models.CharField(max_length=255, blank=True, null=True)
description = RichTextField(blank=True)
cover_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
publish_date = models.DateField(blank=True, null=True)
isbn_10 = models.IntegerField(blank=True, null=True)
isbn_13 = models.CharField(max_length=255, blank=True, null=True)
content_panels = Page.content_panels + [
FieldPanel('name'),
FieldPanel('revision'),
FieldPanel('description', classname="full"),
ImageChooserPanel('cover_image'),
FieldPanel('publish_date'),
FieldPanel('isbn_10'),
FieldPanel('isbn_13'),
]
api_fields = ('created',
'updated',
'revision',
'description',
'cover_image',
'publish_date',
'isbn_10',
'isbn_13')
|
Python
| 0.000001
|
@@ -993,36 +993,8 @@
+ %5B%0A
- FieldPanel('name'),%0A
|
1a40dd2724a4a6364f0786fc5ac5f93d37daeaa0
|
add NoTestDataError
|
judgesite/task.py
|
judgesite/task.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import io
import json
import shutil
import subprocess
import os
from config import conf
from models import save_result
class JudgeTask(object):
def __init__(self, message):
task = json.loads(message)
self.submit_type = task["submit_type"]
self.status_id = str(task["status_id"])
self.code = task["code"]
self.language = task["language"]
self.testdata_id = str(task["testdata_id"])
self.time_limit = str(task["time_limit"])
self.memory_limit = str(task["memory_limit"])
def go(self):
self._clean_files()
self._prepare_temp_dir()
self._dump_code_to_file()
self._prepare_testdata_file()
self._run()
self._read_result()
self._save_result()
self._clean_files()
def _prepare_temp_dir(self):
logging.info("Prepare temp dir")
os.mkdir(conf.tmp_path)
def _dump_code_to_file(self):
logging.info("Dump code to file")
filename = "Main." + self.language
self.code_file = os.path.join(conf.tmp_path, filename)
code_file = io.open(self.code_file, 'w', encoding='utf8')
code_file.write(self.code)
code_file.close()
def _prepare_testdata_file(self):
logging.info("Prepare testdata")
input_file = os.path.join(conf.testdata_path, self.testdata_id, "in.in")
output_file = os.path.join(conf.testdata_path, self.testdata_id, "out.out")
shutil.copy(input_file, conf.tmp_path)
shutil.copy(output_file, conf.tmp_path)
def _run(self):
logging.info("GO!GO!GO!")
commands = ["sudo", "./Core", "-c", self.code_file, "-t",
self.time_limit, "-m", self.memory_limit, "-d",
conf.tmp_path]
subprocess.call(commands)
def _read_result(self):
logging.info("Read result")
result_file = open(os.path.join(conf.tmp_path, "result.txt"), 'r')
self.result = result_file.readline().strip()
self.run_time = result_file.readline().strip()
self.run_memory = result_file.readline().strip()
self.others = result_file.read()
def _save_result(self):
logging.info("Save result")
save_result(status_id=self.status_id,
type=self.submit_type,
run_time=self.run_time,
run_memory=self.run_memory,
compiler_output=self.others,
status=self.result)
def _clean_files(self):
logging.info("Clean files")
if os.path.exists(conf.tmp_path):
shutil.rmtree(conf.tmp_path)
|
Python
| 0.000001
|
@@ -177,16 +177,176 @@
esult%0A%0A%0A
+class NoTestDataException(Exception):%0A%0A def __init__(self, value=None):%0A self.value = value%0A%0A def __str__(self):%0A return repr(self.value)%0A%0A%0A
class Ju
@@ -757,16 +757,121 @@
mit%22%5D)%0A%0A
+ self.result = %22%22%0A self.run_time = 0%0A self.run_memory = 0%0A self.others = %22%22%0A%0A
def
@@ -905,24 +905,41 @@
an_files()%0A%0A
+ try:%0A
self
@@ -964,24 +964,28 @@
()%0A%0A
+
+
self._dump_c
@@ -991,32 +991,36 @@
code_to_file()%0A%0A
+
self._pr
@@ -1033,33 +1033,182 @@
testdata_file()%0A
-%0A
+ except NoTestDataException, e:%0A self.result = 'NoTestDataError'%0A except Exception, e:%0A raise e%0A else:%0A
self._ru
@@ -1208,24 +1208,28 @@
elf._run()%0A%0A
+
self
@@ -1825,32 +1825,45 @@
= os.path.join(
+%0A
conf.testdata_pa
@@ -1924,24 +1924,37 @@
s.path.join(
+%0A
conf.testdat
@@ -1986,24 +1986,237 @@
%22out.out%22)%0A
+ testdata_exists = (%0A os.path.exists(input_file), os.path.exists(output_file))%0A if not testdata_exists%5B0%5D or not testdata_exists%5B1%5D:%0A raise NoTestDataException(testdata_exists)%0A
shut
|
2d34cc4321db36d316007bd394d4e71d39f587e3
|
fix for mutable keywords list object
|
InvertedIndex.py
|
InvertedIndex.py
|
"""
@file InvertedIndex.py
@brief creating inverted indexes and manage them for search
@author Daniel Schauenberg <schauend@informatik.uni-freiburg.de>
@version 0.1
@date 2009-10-24
"""
import heapq
import FileParser
from operator import itemgetter
class IndexManager:
""" Class for managing the complete index
this class manages an inverted index in a simple
hash map of the form
index = {
key : documents
}
where key is the word and documents is an array with
the document IDs
"""
def __init__(self,folder):
""" Constructor which creates the index and the set to hold
the actual filenames
"""
self.index = {}
self.filenames = {}
self.parser = FileParser.DocumentParser(folder)
def build_index(self):
""" method to build the inverted index for the
documents in the given folder. the file parser
object is used to parse the single files.
"""
docs = self.parser.get_documents()
for d in docs:
docid,words = self.parser.parse_file(d)
for w in words:
self.add_key(w,docid,d)
def add_key(self, key, doc, filename):
""" method to add a document to a index object
or create a new object
Parameters:
key -- the keyword to add to the index
doc -- the document id to add
filename -- the actual name of the document
"""
self.filenames[doc] = filename
try:
heapq.heappush(self.index[key],doc)
except:
self.index[key] = [doc]
heapq.heapify(self.index[key])
def get_documents(self,key):
""" method to get documents which contain the given
key
Parameters:
key -- the keyword to get the documents for
Returns:
array of document IDs for the given key
"""
try:
documents = self.index[key.lower()]
return list(set(documents))
except Exception, e:
return -1
def get_intersected_list(self,keywords):
""" method to get the intersected documents list for
the keywords provided in the array
Parameters:
keywords -- array of keywords to search for
Returns:
intersected list of keywords
"""
# list to start intersection with
comparelist = self.get_documents(keywords.pop(0))
if (comparelist == -1):
return -1
# list to later hold the actual filenames
returnlist = []
for key in keywords:
docs = self.get_documents(key)
if (docs == -1): return -1
set(comparelist).intersection(set(docs))
for c in comparelist:
returnlist.append(self.filenames[c])
return returnlist
def get_index_size(self):
""" method to get length of the index
Returns:
length of index
"""
return len(self.index)
def get_index(self):
""" get the index object
"""
return self.index
def get_two_word_one_doc(self):
""" O(n^2) implementation to find exactly one document to contain
two words.
"""
result = []
for i in self.index.keys():
for u in self.index.keys():
intlist = self.get_intersected_list([i,u])
if (len(intlist) == 1):
result.append([intlist[0],i,u])
return result
def get_word_frequencies(self):
""" create object with frequencies of word occurrences
Returns:
dictionary with word occurrences, sorted
"""
occurrences = {}
for key in self.index.keys():
occurrences[key] = len(self.index[key])
sorted_occurrences = sorted(occurrences.items(), key=itemgetter(1))
return sorted_occurrences
def dump_objects(self,path):
""" method to dump methods to disk
Parameters:
path -- filepath where to dump objects
"""
self.parser.write_object_to_disk(path+"index.pickle",self.index)
self.parser.write_object_to_disk(path+"filenames.pickle",self.filenames)
def read_objects(self,path):
""" method to read objects from disk
Parameters:
path -- filepath from where to read objects
"""
self.index = self.parser.read_object_from_disk(path+"index.pickle")
self.filenames = self.parser.read_object_from_disk(path+"filenames.pickle")
|
Python
| 0.000001
|
@@ -2549,24 +2549,63 @@
ection with%0A
+ firstkeyword = keywords.pop(0)%0A
comp
@@ -2633,31 +2633,28 @@
cuments(
+first
keyword
-s.pop(0)
)%0A
@@ -3019,16 +3019,108 @@
mes%5Bc%5D)%0A
+ # fix to keep mutable keyword list consistent%0A keywords.append(firstkeyword)%0A
|
e0b3e23d4890a10f8bca4c699e5a9cd6294fee29
|
add xpub
|
keepkey-for-mn.py
|
keepkey-for-mn.py
|
#!/usr/bin/env python3
import sys, os
sys.path.append( os.path.join( os.path.dirname(__file__), '.' ) )
sys.path.append( os.path.join( os.path.dirname(__file__), '.', 'dashlib' ) )
from config import *
from keepkeylib.client import KeepKeyClient
from keepkeylib.transport_hid import HidTransport
import keepkeylib.ckd_public as bip32
def main():
# List all connected KeepKeys on USB
devices = HidTransport.enumerate()
# Check whether we found any
if len(devices) == 0:
print('No KeepKey found')
return
# Use first connected device
transport = HidTransport(devices[0])
# Creates object for manipulating KeepKey
client = KeepKeyClient(transport)
# Print out KeepKey's features and settings
# print(client.features)
keypath = mpath
bip32_path = client.expand_path(keypath)
# xpub to use
print('xpub --> ' + bip32.serialize(client.get_public_node(bip32_path).node, 0x043587CF))
for i in range(max_gab):
child_path = '%s%s' % (keypath + '/', str(i))
address = client.get_address(coin_name, client.expand_path(child_path))
print (coin_name +' address:', child_path, address)
client.close()
if __name__ == '__main__':
main()
# end
|
Python
| 0
|
@@ -857,24 +857,25 @@
to use %0A
+#
print('xpub
@@ -873,16 +873,21 @@
nt('xpub
+/tpub
--%3E ' +
@@ -956,16 +956,146 @@
3587CF))
+%0A print('xpub/tpub --%3E ' + bip32.serialize(client.get_public_node(bip32_path).node, ( 0x0488B21E if MAINNET else 0x043587CF )))
%0A%0A fo
|
658de49626b6ef8b199bee6502bc62abebaa0803
|
Use an assert for an error that is concerning developers
|
sale_condition_template/sale_order.py
|
sale_condition_template/sale_order.py
|
# -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm, fields
from openerp.tools.translate import _
class SaleOrder(orm.Model):
"""Add text condition"""
_inherit = "sale.order"
_columns = {
'condition_template1_id': fields.many2one(
'base.condition.template',
'Template Top conditions'),
'condition_template2_id': fields.many2one(
'base.condition.template',
'Template Bottom conditions'),
'note1': fields.html('Top conditions'),
'note2': fields.html('Bottom conditions'),
}
def set_condition(self, cr, uid, cond_id, field_name, partner_id):
if not cond_id:
return {'value': {field_name: ''}}
cond_obj = self.pool['base.condition.template']
text = cond_obj.get_value(cr, uid, cond_id, partner_id)
return {'value': {field_name: text}}
def set_note1(self, cr, uid, so_id, cond_id, partner_id):
return self.set_condition(cr, uid, cond_id, 'note1', partner_id)
def set_note2(self, cr, uid, so_id, cond_id, partner_id):
return self.set_condition(cr, uid, cond_id, 'note2', partner_id)
def action_invoice_create(self, cr, user, order_id,
grouped=False,
states=['confirmed', 'done', 'exception'],
date_inv=False, context=None):
# function is design to return only one id
invoice_obj = self.pool['account.invoice']
inv_id = super(SaleOrder, self).action_invoice_create(
cr, user, order_id, grouped, states, date_inv, context=context)
invoice = invoice_obj.browse(cr, user, inv_id, context=context)
if isinstance(order_id, list):
if len(order_id) > 1:
raise orm.except_osv(
_('action_invoice_create can only receive one id'),
_('action_invoice_create can only receive one id'))
order_id = order_id[0]
order = self.browse(cr, user, order_id, context=context)
inv_data = {'condition_template1_id': order.condition_template1_id.id,
'condition_template2_id': order.condition_template2_id.id,
'note1': order.note1,
'note2': order.note2}
invoice.write(inv_data, context=context)
return inv_id
|
Python
| 0.000002
|
@@ -818,46 +818,8 @@
lds%0A
-from openerp.tools.translate import _%0A
%0A%0Acl
@@ -2453,14 +2453,23 @@
_id,
+ (tuple,
list)
+)
:%0A
@@ -2478,18 +2478,22 @@
-if
+assert
len(ord
@@ -2503,195 +2503,53 @@
id)
-%3E 1:%0A raise orm.except_osv(%0A _('action_invoice_create can only receive one id'),%0A _('action_invoice_create can only receive one id'))%0A
+== 1, %221 ID expected, got: %25s%22 %25 (order_id, )
%0A
@@ -2580,16 +2580,17 @@
r_id%5B0%5D%0A
+%0A
|
8fc26c374cefdb4d5e3a7a5727231c752f384f61
|
Update event_handlers.py
|
boiler/user/event_handlers.py
|
boiler/user/event_handlers.py
|
from flask import current_app, url_for
from boiler.user import events
from boiler.di import get_service
from flask import has_request_context
"""
Event handlers
A collection of default handlers for events emitted in user service.
"""
# -----------------------------------------------------------------------------
# User events
# -----------------------------------------------------------------------------
def user_save_event(user):
""" Handle persist event for user entities """
msg = 'User ({}){} updated/saved'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.updated')
def user_delete_event(user):
""" Handle delete event for user entities """
msg = 'User ({}){} deleted'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.deleted')
def login_event(user):
""" Handle login event """
msg = 'User ({}){} logged in'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.login')
def login_nonexistent_event(user):
""" Handle login nonexistent user event """
msg = 'Login failed for nonexistent user'
current_app.logger.info(msg)
# doggy.increment('user.login.failed.nonexistent')
# doggy.increment('user.login.failed')
def login_failed_event(user):
""" Handle login nonexistent user event """
msg = 'Login failed for user ({}){}'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.login.failed')
def logout_event(user):
""" Handle logout event """
msg = 'User ({}){} logged out'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.logout')
def register_event(user):
""" Handle registration event """
base_url = url_for('user.confirm.email.request', _external=True)
user_service = get_service('user.user_service')
user_service.send_welcome_message(user, base_url=base_url)
msg = 'User ({}){} registered'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.registered')
def email_update_requested_event(user):
""" Handle email updated request event """
msg = 'User ({}){} requested email update'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.email_update_requested')
if has_request_context():
base_url = url_for('user.confirm.email.request', _external=True)
user_service = get_service('user.user_service')
user_service.send_email_changed_message(user, base_url=base_url)
else:
msg = 'Update message is not sent, because executed '
msg += 'outside of request context'
current_app.logger.info(msg)
def email_confirmed_event(user):
""" Handle email confirmed event """
msg = 'User ({}){} confirmed email'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.email_confirmed')
def password_change_requested_event(user):
""" Request password change event"""
msg = 'User ({}){} requested password change'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.password_change_requested')
def password_changed_event(user):
""" Handle password changed event """
msg = 'User ({}){} changed password'.format(user.id, user.username)
current_app.logger.info(msg)
# doggy.increment('user.password_changed')
events.user_save_event.connect(user_save_event)
events.user_delete_event.connect(user_delete_event)
events.login_event.connect(login_event)
events.login_failed_nonexistent_event.connect(login_nonexistent_event)
events.login_failed_event.connect(login_failed_event)
events.logout_event.connect(logout_event)
events.register_event.connect(register_event)
events.email_update_requested_event.connect(email_update_requested_event)
events.password_change_requested_event.connect(password_change_requested_event)
events.password_changed_event.connect(password_changed_event)
# -----------------------------------------------------------------------------
# Role events
# -----------------------------------------------------------------------------
def user_got_role_event(user, role):
""" User got new role """
msg = 'User ({}){} got new role [{}]'
current_app.logger.info(msg.format(user.id, user.username, role.handle))
def user_lost_role_event(user, role):
""" User lost a role """
msg = 'User ({}){} lost a role [{}]'
current_app.logger.info(msg.format(user.id, user.username, role.handle))
events.user_got_role_event.connect(user_got_role_event)
events.user_lost_role_event.connect(user_lost_role_event)
|
Python
| 0.000002
|
@@ -134,16 +134,29 @@
_context
+, current_app
%0A%0A%22%22%22%0AEv
@@ -1787,24 +1787,105 @@
n event %22%22%22%0A
+ confirm = current_app.di.get_parameter('USER_ACCOUNTS_REQUIRE_CONFIRMATION')%0A
base_url
@@ -1936,24 +1936,44 @@
ternal=True)
+ if confirm else ''%0A
%0A user_se
|
6fc68abdb48134f4e647f0a1d69becd374d1147f
|
add missing Python file encoding
|
brasilcomvc/accounts/admin.py
|
brasilcomvc/accounts/admin.py
|
from django.contrib import admin
from .models import User, UserAddress
class UserAdmin(admin.ModelAdmin):
class UserAddressInline(admin.StackedInline):
model = UserAddress
list_display = ('email', 'full_name', 'username',)
fieldsets = (
('Informações Pessoais', {
'fields': ('full_name', 'username', 'email',),
}),
('Informações Profissionais', {
'fields': ('job_title', 'bio',),
}),
('Notificações', {
'fields': ('email_newsletter',),
}),
)
inlines = (UserAddressInline,)
admin.site.register(User, UserAdmin)
|
Python
| 0.000014
|
@@ -1,12 +1,68 @@
+# coding: utf8%0Afrom __future__ import unicode_literals%0A%0A
from django.
|
ddb3f3cb33bab10113dbf290c65b9919339fdd72
|
Update artman version
|
synthtool/gcp/gapic_generator.py
|
synthtool/gcp/gapic_generator.py
|
from pathlib import Path
import tempfile
import platform
from synthtool import _tracked_paths
from synthtool import log
from synthtool import shell
from synthtool.sources import git
GOOGLEAPIS_URL: str = 'git@github.com:googleapis/googleapis.git'
GOOGLEAPIS_PRIVATE_URL: str = (
'git@github.com:googleapis/googleapis-private.git')
class GAPICGenerator:
def __init__(self, private: bool = False):
# Docker on mac by default cannot use the default temp file location
# instead use the more standard *nix /tmp location\
if platform.system() == 'Darwin':
tempfile.tempdir = '/tmp'
self._ensure_dependencies_installed()
# clone google apis to temp
# git clone git@github.com:googleapis/googleapis.git
if not private:
googleapis_url = GOOGLEAPIS_URL
else:
googleapis_url = GOOGLEAPIS_PRIVATE_URL
self.googleapis = git.clone(googleapis_url)
def py_library(self, service: str, version: str, **kwargs) -> Path:
'''
Generates the Python Library files using artman/GAPIC
returns a `Path` object
library: path to library. 'google/cloud/speech'
version: version of lib. 'v1'
'''
return self._generate_code(service, version, 'python', **kwargs)
def _generate_code(self, service, version, language,
config_path=None, artman_output_name=None):
# map the language to the artman argument and subdir of genfiles
GENERATE_FLAG_LANGUAGE = {
'python': ('python_gapic', 'python'),
'nodejs': ('nodejs_gapic', 'js'),
'ruby': ('ruby_gapic', 'ruby'),
}
if language not in GENERATE_FLAG_LANGUAGE:
raise ValueError("provided language unsupported")
gapic_arg, gen_language = GENERATE_FLAG_LANGUAGE[language]
# Ensure docker image
log.debug("Pulling artman docker image.")
shell.run(['docker', 'pull', 'googleapis/artman:0.10.1'])
# Run the code generator.
# $ artman --config path/to/artman_api.yaml generate python_gapic
if config_path is None:
config_path = (
Path('google/cloud') / service
/ f"artman_{service}_{version}.yaml")
elif Path(config_path).is_absolute():
config_path = Path(config_path).relative_to('/')
else:
config_path = Path('google/cloud') / service / Path(config_path)
if not (self.googleapis/config_path).exists():
raise FileNotFoundError(
f"Unable to find configuration yaml file: {config_path}.")
subprocess_args = ['artman', '--config', config_path, 'generate',
gapic_arg]
log.info(f"Running generator.")
result = shell.run(subprocess_args, cwd=self.googleapis)
if result.returncode:
raise Exception(f"Failed to generate from {config_path}")
# Expect the output to be in the artman-genfiles directory.
# example: /artman-genfiles/python/speech-v1
if artman_output_name is None:
artman_output_name = f"{service}-{version}"
genfiles_dir = self.googleapis/'artman-genfiles'/gen_language
genfiles = genfiles_dir/artman_output_name
if not genfiles.exists():
raise FileNotFoundError(
f"Unable to find generated output of artman: {genfiles}.")
_tracked_paths.add(genfiles)
return genfiles
def _ensure_dependencies_installed(self):
log.debug("Ensuring dependencies")
dependencies = ['docker', 'git', 'artman']
failed_dependencies = []
for dependency in dependencies:
return_code = shell.run(
['which', dependency], check=False).returncode
if return_code:
failed_dependencies.append(dependency)
if failed_dependencies:
raise EnvironmentError(
f"Dependencies missing: {', '.join(failed_dependencies)}")
shell.run(['docker', 'pull', 'googleapis/artman:0.11.0'])
# TODO: Install artman in a virtualenv.
|
Python
| 0.000002
|
@@ -177,16 +177,41 @@
rt git%0A%0A
+ARTMAN_VERSION = '0.12.0'
%0AGOOGLEA
@@ -2008,32 +2008,33 @@
ocker', 'pull',
+f
'googleapis/artm
@@ -2040,14 +2040,22 @@
man:
-0.10.1
+ARTMAN_VERSION
'%5D)%0A
@@ -4136,16 +4136,17 @@
'pull',
+f
'googlea
@@ -4160,14 +4160,22 @@
man:
-0.11.0
+ARTMAN_VERSION
'%5D)%0A
|
56cbbef7b8bbfa31445dad1561c4014804250fd5
|
fix test
|
kyototycoon/test/test_kyototycoon.py
|
kyototycoon/test/test_kyototycoon.py
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from nose.plugins.attrib import attr
# 3p
import requests
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
config = {
'instances': [{
'report_url': 'http://localhost:1978/rpc/report',
'tags': ['optional:tag1']
}]
}
METRICS = [
'kyototycoon.threads',
'kyototycoon.connections_per_s',
'kyototycoon.ops.get.hits_per_s',
'kyototycoon.ops.get.misses_per_s',
'kyototycoon.ops.set.hits_per_s',
'kyototycoon.ops.set.misses_per_s',
'kyototycoon.ops.del.hits_per_s',
'kyototycoon.ops.del.misses_per_s',
'kyototycoon.records',
'kyototycoon.size',
'kyototycoon.ops.get.total_per_s',
'kyototycoon.ops.get.total_per_s',
'kyototycoon.ops.set.total_per_s',
'kyototycoon.ops.set.total_per_s',
'kyototycoon.ops.del.total_per_s',
'kyototycoon.ops.del.total_per_s',
# 'kyototycoon.replication.delay', # Since I am not spinning up multiple servers, this should be 0
]
@attr(requires='kyototycoon')
class TestKyototycoon(AgentCheckTest):
"""Basic Test for kyototycoon integration."""
CHECK_NAME = 'kyototycoon'
def setUp(self):
dat = {
'dddd': 'dddd'
}
headers = {
'X-Kt-Mode': 'set'
}
for x in range(0, 100):
requests.put('http://localhost:1978', data=dat, headers=headers)
requests.get('http://localhost:1978')
def test_check(self):
"""
Testing Kyototycoon check.
"""
self.run_check_twice(config)
for mname in METRICS:
self.assertMetric(mname, count=1, at_least=0, tags=['optional:tag1'])
self.assertServiceCheck('kyototycoon.can_connect', status=AgentCheck.OK, tags=['optional:tag1'], at_least=1)
self.coverage_report()
|
Python
| 0.000002
|
@@ -1691,16 +1691,201 @@
ETRICS:%0A
+ if mname == 'kyototycoon.records' or 'kyototycoon.size':%0A self.assertMetric(mname, count=1, at_least=0, tags=%5B'optional:tag1', 'db:0'%5D)%0A else:%0A
|
fab3936bacbb961f2214ea5d1dce913c1635ab2c
|
Commit inicial de una rama para la versión 7.0. Todos los módulos fueron marcados como no instalables mientras no se compruebe que funcionan o migren, para poder usarlos o probarlos se debería poner el atributo installable de los ficheros __openerp__.py de cada módulo a True
|
l10n_es_account_asset/__openerp__.py
|
l10n_es_account_asset/__openerp__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Assets Management",
"version" : "1.0",
"depends" : ["account"],
"author" : "OpenERP S.A.",
"description": """Financial and accounting asset management.
This Module manages the assets owned by a company or an individual. It will keep track of depreciation's occurred on
those assets. And it allows to create Move's of the depreciation lines.
""",
"website" : "http://www.openerp.com",
"category" : "Accounting & Finance",
"init_xml" : [
],
"demo_xml" : [
],
'test': ['test/account_asset.yml',
],
"update_xml" : [
"security/account_asset_security.xml",
"security/ir.model.access.csv",
"account_asset_wizard.xml",
"wizard/account_asset_change_duration_view.xml",
"wizard/wizard_asset_compute_view.xml",
"account_asset_view.xml",
"account_asset_invoice_view.xml",
"report/account_asset_report_view.xml",
],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.999886
|
@@ -1980,11 +1980,12 @@
e%22:
-Tru
+Fals
e,%0A%7D
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.