commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
1c652e3ae8d5d091a9c87533329a5b727707cb1c
|
enable passwordless sudo as any user
|
fabtools/tests/functional_tests/conftest.py
|
fabtools/tests/functional_tests/conftest.py
|
from pipes import quote
import logging
import os
import sys
from mock import patch
import pytest
from fabric.api import env, hide, lcd, local, settings
from fabric.state import connections
from fabtools.vagrant import version as _vagrant_version
HERE = os.path.dirname(__file__)
VAGRANT_VERSION = _vagrant_version()
MIN_VAGRANT_VERSION = (1, 3)
@pytest.yield_fixture(scope='session', autouse=True)
def setup_package():
_check_vagrant_version()
vagrant_box = os.environ.get('FABTOOLS_TEST_BOX')
if not vagrant_box:
pytest.skip("Set FABTOOLS_TEST_BOX to choose a Vagrant base box for functional tests")
vagrant_provider = os.environ.get('FABTOOLS_TEST_PROVIDER')
reuse_vm = os.environ.get('FABTOOLS_TEST_REUSE_VM')
_configure_logging()
_allow_fabric_to_access_the_real_stdin()
if not reuse_vm:
_stop_vagrant_machine()
_init_vagrant_machine(vagrant_box)
_start_vagrant_machine(vagrant_provider)
_target_vagrant_machine()
_set_optional_http_proxy()
_update_package_index()
yield
if not reuse_vm:
_stop_vagrant_machine()
def _check_vagrant_version():
if VAGRANT_VERSION is None:
pytest.skip("Vagrant is required for functional tests")
elif VAGRANT_VERSION < MIN_VAGRANT_VERSION:
pytest.skip("Vagrant >= %s is required for functional tests" % ".".join(map(str, MIN_VAGRANT_VERSION)))
def _configure_logging():
logger = logging.getLogger('paramiko')
logger.setLevel(logging.WARN)
def _allow_fabric_to_access_the_real_stdin():
patcher = patch('fabric.io.sys')
mock_sys = patcher.start()
mock_sys.stdin = sys.__stdin__
_VAGRANTFILE_TEMPLATE = """\
Vagrant.configure(2) do |config|
config.vm.box = "%s"
# Speed up downloads using a shared cache across boxes
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
end
"""
def _init_vagrant_machine(base_box):
path = os.path.join(HERE, 'Vagrantfile')
contents = _VAGRANTFILE_TEMPLATE % base_box
with open(path, 'w') as vagrantfile:
vagrantfile.write(contents)
def _start_vagrant_machine(provider):
if provider:
options = ' --provider %s' % quote(provider)
else:
options = ''
with lcd(HERE):
with settings(hide('stdout')):
local('vagrant up' + options)
def _stop_vagrant_machine():
with lcd(HERE):
with settings(hide('stdout', 'stderr', 'warnings'), warn_only=True):
local('vagrant halt')
local('vagrant destroy -f')
def _target_vagrant_machine():
config = _vagrant_ssh_config()
_set_fabric_env(
host=config['HostName'],
port=config['Port'],
user=config['User'],
key_filename=config['IdentityFile'].strip('"'),
)
_clear_fabric_connection_cache()
def _vagrant_ssh_config():
with lcd(HERE):
with settings(hide('running')):
output = local('vagrant ssh-config', capture=True)
config = {}
for line in output.splitlines()[1:]:
key, value = line.strip().split(' ', 2)
config[key] = value
return config
def _set_fabric_env(host, port, user, key_filename):
env.host_string = env.host = "%s:%s" % (host, port)
env.user = user
env.key_filename = key_filename
env.disable_known_hosts = True
env.abort_on_prompts = True
def _set_optional_http_proxy():
http_proxy = os.environ.get('FABTOOLS_HTTP_PROXY')
if http_proxy is not None:
env.shell_env['http_proxy'] = http_proxy
def _clear_fabric_connection_cache():
if env.host_string in connections:
del connections[env.host_string]
def _update_package_index():
from fabtools.system import distrib_family
family = distrib_family()
if family == 'debian':
from fabtools.require.deb import uptodate_index
uptodate_index()
|
Python
| 0
|
@@ -3847,16 +3847,506 @@
ptodate_index()%0A
+%0A%0A@pytest.fixture(scope='session', autouse=True)%0Adef allow_sudo_user(setup_package):%0A %22%22%22%0A Fix sudo config if needed%0A%0A Some Vagrant boxes come with a too restrictive sudoers config%0A and only allow the vagrant user to run commands as root.%0A %22%22%22%0A from fabtools.require import file as require_file%0A require_file(%0A '/etc/sudoers.d/fabtools',%0A contents=%22vagrant ALL=(ALL) NOPASSWD:ALL%5Cn%22,%0A owner='root',%0A mode='440',%0A use_sudo=True,%0A )%0A
|
1374807c05d9ebacb7a8cc6a75811697198bae32
|
add template fixture to document tests
|
fiduswriter/document/tests/editor_helper.py
|
fiduswriter/document/tests/editor_helper.py
|
import time
from random import randrange
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from testing.selenium_helper import SeleniumHelper
from document.models import Document
class EditorHelper(SeleniumHelper):
"""
Common functions used in threaded tests
"""
def create_new_document(self):
doc = Document.objects.create(
owner=self.user,
)
doc.save()
return doc
def load_document_editor(self, driver, doc):
driver.get("%s%s" % (
self.live_server_url,
doc.get_absolute_url()
))
WebDriverWait(driver, self.wait_time).until(
EC.presence_of_element_located((By.CLASS_NAME, 'editor-toolbar'))
)
self.inject_helpers(driver)
def inject_helpers(self, driver):
test_caret_script = open(
'static-transpile/js/test_caret.js',
'r'
).read()
driver.execute_script(
test_caret_script
)
def input_text(self, document_input, text):
for char in text:
document_input.send_keys(char)
time.sleep(randrange(10, 40) / 200.0)
def add_title(self, driver):
title = "My title"
driver.execute_script(
'window.testCaret.setSelection(2,2)')
document_input = self.driver.find_element_by_class_name(
'ProseMirror'
)
self.input_text(document_input, title)
def wait_for_doc_size(self, driver, size, seconds=False):
if seconds is False:
seconds = self.wait_time
doc_size = driver.execute_script(
'return window.theApp.page.view.state.doc.content.size')
if doc_size < size and seconds > 0:
time.sleep(0.1)
self.wait_for_doc_size(driver, size, seconds - 0.1)
def wait_for_doc_sync(self, driver, driver2, seconds=False):
if seconds is False:
seconds = self.wait_time
doc_str = driver.execute_script(
'return window.theApp.page.view.state.doc.toString()')
doc2_str = driver2.execute_script(
'return window.theApp.page.view.state.doc.toString()')
if (doc_str != doc2_str):
# The strings don't match.
time.sleep(0.1)
self.wait_for_doc_sync(driver, driver2, seconds - 0.1)
|
Python
| 0
|
@@ -489,16 +489,58 @@
f.user,%0A
+ template_id=1 # from fixture%0A
|
4a0ee61e459803ce15765b8d6980fb32b9eca1b9
|
handle errors better in cached test
|
ftl/cached/cached.py
|
ftl/cached/cached.py
|
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import logging
import httplib2
import json
from ftl.common import ftl_util
from ftl.common import constants
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_image
from containerregistry.client.v2_2 import docker_session
from containerregistry.transport import transport_pool
_THREADS = 32
class Cached():
def __init__(self, args, runtime):
self._base = args.base
self._name = args.name
self._directory = args.directory
self._labels = [args.label_1, args.label_2]
self._dirs = [args.dir_1, args.dir_2]
self._offset = args.layer_offset
self._runtime = runtime
logging.getLogger().setLevel("NOTSET")
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d,%H:%M:%S')
def run_cached_tests(self):
logging.info('Beginning building {0} images'.format(self._runtime))
# For the binary
builder_path = 'ftl/{0}_builder.par'.format(self._runtime)
# For container builder
if not os.path.isfile(builder_path):
builder_path = 'bazel-bin/ftl/{0}_builder.par'.format(
self._runtime)
lyr_shas = []
for label, dir in zip(self._labels, self._dirs):
logging.info("label: %s" % label)
logging.info("dir: %s" % dir)
img_name = ''.join([self._name.split(":")[0], ":", label])
ftl_args = [
builder_path, '--base', self._base, '--name', img_name,
'--directory', dir
]
if label == "original":
ftl_args.extend(['--no-cache'])
cmd = subprocess.Popen(
ftl_args,
stderr=subprocess.PIPE)
_, output = cmd.communicate()
logging.info('output of build {0}: {1}'.format(label, output))
lyr_shas.append(self._fetch_lyr_shas(img_name))
self._cleanup(constants.VENV_DIR)
self._del_img_from_gcr(img_name)
try:
self._compare_layers(lyr_shas[0], lyr_shas[1], self._offset)
except ftl_util.FTLException as e:
logging.error(e)
exit(1)
def _fetch_lyr_shas(self, img_name):
name = docker_name.Tag(img_name)
creds = docker_creds.DefaultKeychain.Resolve(name)
transport = transport_pool.Http(httplib2.Http, size=_THREADS)
with docker_image.FromRegistry(name, creds, transport) as img:
lyrs = json.loads(img.manifest())['layers']
lyr_shas = []
for lyr in lyrs:
lyr_shas.append(lyr['digest'])
return set(lyr_shas)
def _compare_layers(self, lyr_shas_1, lyr_shas_2, offset):
lyr_diff = 0
if len(lyr_shas_1) <= len(lyr_shas_2):
lyr_diff = lyr_shas_1 - lyr_shas_2
else:
lyr_diff = lyr_shas_2 - lyr_shas_1
logging.info(
"Encountered %s differences between layers" % len(lyr_diff))
logging.info("Different layer shas: %s" % lyr_diff)
if len(lyr_diff) != offset:
raise ftl_util.FTLException(
"expected {0} different layers, got {1}".format(
self._offset, len(lyr_diff)))
def _cleanup(self, path):
try:
subprocess.check_call(['rm', '-rf', path])
except subprocess.CalledProcessError as e:
logging.info(e)
def _del_img_from_gcr(self, img_name):
img_tag = docker_name.Tag(img_name)
creds = docker_creds.DefaultKeychain.Resolve(img_tag)
transport = transport_pool.Http(httplib2.Http, size=_THREADS)
with docker_image.FromRegistry(img_tag, creds,
transport) as base_image:
img_digest = docker_name.Digest(''.join(
[self._name.split(":")[0], "@",
str(base_image.digest())]))
logging.info('Deleting tag {0}'.format(img_tag))
docker_session.Delete(img_tag, creds, transport)
logging.info('Deleting image {0}'.format(img_digest))
docker_session.Delete(img_digest, creds, transport)
return
|
Python
| 0.000001
|
@@ -2385,275 +2385,328 @@
-cmd = subprocess.Popen(%0A ftl_args,%0A stderr=subprocess.PIPE)%0A _, output = cmd.communicate()%0A logging.info('output of build %7B0%7D: %7B1%7D'.format(label, output))%0A lyr_shas.append(self._fetch_lyr_shas(img_name))%0A
+try:%0A ftl_util.run_command(%0A %22cached-ftl-build-%25s%22 %25 img_name,%0A ftl_args)%0A lyr_shas.append(self._fetch_lyr_shas(img_name))%0A except ftl_util.FTLException as e:%0A logging.error(e)%0A exit(1)%0A finally:%0A
@@ -2755,24 +2755,30 @@
+
+ #
self._del_im
@@ -2810,176 +2810,189 @@
-try:%0A self._compare_layers(lyr_shas%5B0%5D, lyr_shas%5B1%5D, self._offset)%0A except ftl_util.FTLException as e:%0A logging.error(e)%0A exit(1
+if len(lyr_shas) is not 2:%0A logging.error(%22Incorrect number of layers to compare%22)%0A exit(1)%0A self._compare_layers(lyr_shas%5B0%5D, lyr_shas%5B1%5D, self._offset
)%0A%0A
@@ -3527,16 +3527,93 @@
ffset):%0A
+ logging.info(%22Comparing layers %5Cn%25s%5Cn%25s%22 %25 (lyr_shas_1, lyr_shas_2))%0A
|
03cf1abcb9262b4b0b9dd3b57ac07f7d507ddd8f
|
Drop fts.backends.base.BaseManager.__call__ convenience method. It breaks using manager (and, more importantly, RelatedManager which inherits that method) in views. See http://stackoverflow.com/questions/1142411/reverse-foreign-key-in-django-template for details.
|
fts/backends/base.py
|
fts/backends/base.py
|
"Base Fts class."
from django.db import transaction
from django.db import models
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
class InvalidFtsBackendError(ImproperlyConfigured):
pass
class BaseClass(object):
class Meta:
abstract = True
class BaseManager(models.Manager):
class Meta:
abstract = True
def __init__(self, **kwargs):
super(BaseManager, self).__init__()
self.fields = kwargs.get('fields')
self.default_weight = kwargs.get('default_weight')
if self.default_weight not in ['A', 'B', 'C', 'D']:
self.default_weight = 'A'
self.language_code = kwargs.get('language_code')
if not self.language_code:
from django.utils import translation
self.language_code = translation.get_language().split('-',1)[0].lower()
def __call__(self, query, **kwargs):
return self.search(query, **kwargs)
def contribute_to_class(self, cls, name):
# Instances need to get to us to update their indexes.
setattr(cls, '_search_manager', self)
super(BaseManager, self).contribute_to_class(cls, name)
if not self.fields:
self.fields = self._find_text_fields()
if isinstance(self.fields, (list, tuple)):
self._fields = {}
for field in self.fields:
self._fields[field] = self.default_weight
else:
self._fields = fields
def update_index(self, pk=None):
"""
Updates the full-text index for one, many, or all instances of this manager's model.
"""
raise NotImplementedError
def search(self, query, **kwargs):
raise NotImplementedError
def _find_text_fields(self):
"""
Return the names of all CharField and TextField fields defined for this manager's model.
"""
fields = [f for f in self.model._meta.fields if isinstance(f, (models.CharField, models.TextField))]
return [f.name for f in fields]
class BaseModel(models.Model):
"""
A convience Model wrapper that provides an update_index method for object instances,
as well as automatic index updating. The index is stored as a tsvector column on the
model's table. A model may specify a boolean class variable, _auto_reindex, to control
whether the index is automatically updated when save is called.
"""
class Meta:
abstract = True
def update_index(self):
"""
Update the index.
"""
if hasattr(self, '_search_manager'):
self._search_manager.update_index(pk=self.pk)
@transaction.commit_on_success
def save(self, *args, **kwargs):
super(BaseModel, self).save(*args, **kwargs)
if hasattr(self, '_auto_reindex'):
if self._auto_reindex:
self.update_index()
else:
self.update_index()
|
Python
| 0
|
@@ -905,105 +905,8 @@
r()%0D
-%0A %0D%0A def __call__(self, query, **kwargs):%0D%0A return self.search(query, **kwargs)%0D
%0A%0D%0A
|
16c218023f9119f8b7a1603dbf53fb890da2bac8
|
Change scrape_page to avoid meta data.
|
news.py
|
news.py
|
import feedparser
import nltk
import requests
from bs4 import BeautifulSoup as bs
from sklearn.feature_extraction.text import TfidfVectorizer
import networkx as nx
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import RSLPStemmer
from nltk import word_tokenize
from string import punctuation
from detection import detect_language
def rss_to_links(rssurl):
"""Input: url of the rss.
Output: list of links to news."""
feed = feedparser.parse(rssurl)
return [link['link'] for link in feed['entries']]
def scrape_page(url):
"""Input: url of news.
Output: Text from the page."""
page = requests.get(url).text
tree = bs(page, "html.parser")
title = tree.title.string
for script in tree(["script","style"]):
script.extract()
paragraphs = tree.find_all('p')
#print paragraphs
filtered_paragraphs = [paragraph.get_text() for paragraph in paragraphs]
#print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
#print filtered_paragraphs
size_paragraphs = [len(para) for para in filtered_paragraphs]
size_cut = int(sum(size_paragraphs)/len(size_paragraphs))
texts = [text for text in filtered_paragraphs if len(text) > int(size_cut)]
#print "##################"
#print texts
final_text = []
for text in texts:#[:-2]:
if False: #text[0] == "\n":
break
# if text[:5] != "Foto:":
final_text.append(text)
#print final_text
return title, " ".join(final_text)
def textrank(document, language):
#print document
nltk.data.path.append('./nltk_data/')
sentence_tokenizer = nltk.data.load("tokenizers/punkt/" + language + ".pickle")
sentences = sentence_tokenizer.tokenize(document)
stopword = stopwords.words(language)
punct = list(punctuation)
non_words = stopword + punct
non_words = set(non_words)
# Stemmer
#stemmer = RSLPStemmer()
filtered_sentences = []
for sentence in sentences:
words = word_tokenize(sentence)
#words = [stemmer.stem(word.lower()) for word in words if word not in non_words]
words = [(word.lower()) for word in words if word not in non_words]
sentence = " ".join(words)
filtered_sentences.append(sentence)
normalized = TfidfVectorizer(ngram_range=(1,1)).fit_transform(filtered_sentences)
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return sorted(((scores[i], i, s) for i,s in enumerate(sentences)),
reverse=True)
def summarization(ranked):
cut = 3
size = len(ranked)
if size >= cut:
shrink = int(size ** 0.55) #size/cut
most_relevant = ranked[:shrink]
else:
most_relevant = ranked
most_relevant.sort(key=lambda x:x[1])
sentences = []
for ranks in most_relevant:
sentences.append(ranks[2])
return " ".join(sentences)
def get_summary(url):
"""Input: url
Output: summary"""
title, document = scrape_page(url)
language = detect_language(document)
ranked = textrank(document, language)
summary = summarization(ranked)
return title, summary.strip(), language
def wordrank(document, language):
#print document
nltk.data.path.append('./nltk_data/')
sentence_tokenizer = nltk.data.load("tokenizers/punkt/" + language + ".pickle")
sentences = sentence_tokenizer.tokenize(document)
stopword = stopwords.words(language)
punct = list(punctuation)
non_words = stopword + punct
non_words = set(non_words)
# Stemmer
#stemmer = RSLPStemmer()
filtered_sentences = []
for sentence in sentences:
words = word_tokenize(sentence)
#words = [stemmer.stem(word.lower()) for word in words if word not in non_words]
words = [(word.lower()) for word in words if word not in non_words]
sentence = " ".join(words)
filtered_sentences.append(sentence)
vectorizer = TfidfVectorizer(ngram_range=(1,1))
normalized = vectorizer.fit_transform(filtered_sentences)
similarity_graph = normalized.T * normalized
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
wordrank = []
for word, feature in vectorizer.vocabulary_.iteritems():
wordrank.append((scores[feature], word))
wordrank.sort(key=lambda x: x[0], reverse=True)
return wordrank
def keyword_extraction(ranked):
keywords = [word[1] for word in ranked]
return keywords[:5]
def get_keywords(url):
"""Input: url
Output: summary"""
_, document = scrape_page(url)
language = detect_language(document)
ranked = wordrank(document, language)
keywords = keyword_extraction(ranked)
return keywords
# TODO: deal with pages with small content like: http://gshow.globo.com/novelas/rock-story/Vem-por-ai/noticia/2017/01/banda-44-ganha-nova-integrante.html
# TODO: french codification
|
Python
| 0
|
@@ -794,26 +794,21 @@
t()%0A
-paragraphs
+all_p
= tree.
@@ -829,25 +829,291 @@
-#print paragraphs
+t0 = None%0A for tag in all_p:%0A if t0 == None:%0A t0 = tag%0A else:%0A t1 = tag.parent%0A if t1 == t0.parent:%0A content = t1%0A break%0A else:%0A t0 = tag%0A%0A paragraphs = content.find_all('p')
%0A
@@ -1190,82 +1190,8 @@
hs%5D%0A
- #print %22$$$$$$$$$$$$$$$$$$$$$$$$$$$$$%22%0A #print filtered_paragraphs%0A
@@ -1398,57 +1398,8 @@
t)%5D%0A
- #print %22##################%22%0A #print texts%0A
@@ -1440,15 +1440,8 @@
xts:
-#%5B:-2%5D:
%0A
@@ -1458,26 +1458,8 @@
lse:
- #text%5B0%5D == %22%5Cn%22:
%0A
@@ -1477,41 +1477,8 @@
eak%0A
- # if text%5B:5%5D != %22Foto:%22:%0A
@@ -1509,30 +1509,8 @@
xt)%0A
- #print final_text%0A
|
3828c02e73fa2a190f47ee7b3ad4b3670944367c
|
Swap fields. Closes https://github.com/p2pu/p2pu-website/issues/480
|
custom_registration/forms.py
|
custom_registration/forms.py
|
# coding=utf-8
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.utils.translation import ugettext as _
from django.forms import ValidationError
from django.contrib.auth import password_validation
from django.contrib.auth.forms import UserCreationForm
from django.utils.safestring import mark_safe
from studygroups.models import Profile
newsletter_label = _('P2PU can contact me')
newsletter_help_text = _('Join our mailing list to learn about upcoming events, new courses, and news from the community. (Approximately six emails/year)')
class SignupForm(UserCreationForm):
communication_opt_in = forms.BooleanField(required=False, initial=False, label=newsletter_label, help_text=newsletter_help_text)
consent_opt_in = forms.BooleanField(required=True, initial=False, label=mark_safe(_('I consent to P2PU storing my data and accept the <a href="https://www.p2pu.org/en/terms/">terms of service</a>')), help_text=_('P2PU values your privacy and will never sell your data.'))
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
self.fields['first_name'].required = True
self.fields['last_name'].required = True
def clean(self):
cleaned_data = super(SignupForm, self).clean()
username = cleaned_data.get('email')
if User.objects.filter(username__iexact=username).exists():
self.add_error('email', _('A user with that email address already exists.'))
return cleaned_data
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'password1', 'password2', 'communication_opt_in', 'consent_opt_in']
class CustomPasswordResetForm(PasswordResetForm):
""" Use case insensitive email address when searching for users """
def clean_email(self):
email = self.cleaned_data['email']
if not User.objects.filter(email__iexact=email, is_active=True).exists():
raise ValidationError(_("There is no user registered with the specified email address!"))
return email
class UserForm(forms.ModelForm):
email = forms.EmailField(disabled=True, help_text=mark_safe(_('If you’d like to change the address affiliated with your account, please contact <a href="mailto:thepeople@p2pu.org">thepeople@p2pu.org</a>')))
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['avatar', 'bio', 'contact_url', 'city', 'country', 'place_id', 'region', 'latitude', 'longitude', 'communication_opt_in']
labels = {
'avatar': _('Profile photo'),
'bio': _('Short bio (max 500 characters)'),
'contact_url': _('Contact URL'),
'city': _('City'),
'communication_opt_in': newsletter_label,
}
placeholders = {
'contact_url': _("Twitter, LinkedIn, website, etc.")
}
help_texts = {
'contact_url': _('Where can potential team members find your contact information? i.e. Staff page, Twitter, personal website, etc.'),
'communication_opt_in': newsletter_help_text,
}
widgets = {
'bio': forms.Textarea(attrs={'rows':5, 'cols':10}),
'latitude': forms.HiddenInput,
'longitude': forms.HiddenInput,
'place_id': forms.HiddenInput,
'country': forms.HiddenInput,
'region': forms.HiddenInput,
}
|
Python
| 0.000001
|
@@ -1735,35 +1735,29 @@
sword2', 'co
-mmunication
+nsent
_opt_in', 'c
@@ -1753,29 +1753,35 @@
opt_in', 'co
-nsent
+mmunication
_opt_in'%5D%0A%0A%0A
|
2934b3e321655ff9f8dd9575d2e1bc465360db4e
|
comment code
|
filesystem/usr/share/crewchief/crewchief.py
|
filesystem/usr/share/crewchief/crewchief.py
|
#!/usr/bin/env python2
# Copyright 2013 Carl George
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import glob
import subprocess
from syslog import syslog as log
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def parse_config():
''' obtain the user settings from crewchief.ini '''
# parse the config file
config = configparser.ConfigParser()
config.read('/etc/crewchief/crewchief.ini')
# set the defaults
settings = {'max_api_attempts': 10,
'api_wait_seconds': 60}
try:
for each in config.options('main'):
# test if the config option is a valid key
if each in settings.keys():
try:
# overwrite the default setting to the one from config
settings[each] = config.getint('main', each)
except ValueError:
# not an interger, use the default
log('{0}: invalid value, using default'.format(each))
else:
# the option is bogus
log('{0}: invalid option'.format(each))
except configparser.NoSectionError:
# the file is malformed or missing
log('malformed or missing configuration file, using defaults')
# return our settings dictionary
return settings
def get_region():
''' obtain the region from the xenstore '''
# system command to pull region from xenstore
xencmd = ['xenstore-read', 'vm-data/provider_data/region']
try:
output = subprocess.Popen(xencmd,
stdout=subprocess.PIPE
stderr=subprocess.PIPE
).communicate()
except FileNotFoundError:
msg = 'could not find xenstore-read command'
else:
if output[0]:
region = output[0].rstrip('\n')
elif 'Permission denied' in output[1]:
msg = 'permission denied reading xenstore'
else:
msg = 'unknown error while reading xenstore'
if region:
return region
else:
log(msg)
sys.exit(msg)
def query_api(settings):
''' query the Rackconnect API to see if automation is complete '''
# pull our settings from the dictionary
max_api_attempts = settings.get('max_api_attempts')
api_wait_seconds = settings.get('api_wait_seconds')
sleepmsg = 'sleeping {0} seconds'.format(api_wait_seconds)
# construct the endpoint url
apiurl = 'https://{REGION}.{DOMAIN}/{VERSION}/{INFO}'.format(
REGION=get_region(),
DOMAIN='api.rackconnect.rackspace.com',
VERSION='v1',
INFO='automation_status')
# loop the API call until done or max attempts
for each in range(int(max_api_attempts)):
try:
req = Request(apiurl)
res = urlopen(req, timeout=3)
rcstatus = res.read()
except Exception:
log('rackconnect API error, {0}'.format(sleepmsg))
time.sleep(api_wait_seconds)
continue
else:
if rcstatus == 'DEPLOYED':
log('rackconnect automation complete')
return True
else:
log('rackconnect automation incomplete, {0}'.format(sleepmsg))
time.sleep(api_wait_seconds)
continue
else:
log('hit max api attempts, giving up')
return False
def get_tasks(settings):
''' obtain the list of scripts from /etc/crewchief/tasks.d '''
# set the tasks directory
tasks_dir = '/etc/crewchief/tasks.d'
# create a list of all the files in that directory
scripts = glob.glob('{0}/*'.format(tasks_dir))
# remove the README file from the list
try:
scripts.remove('{0}/README'.format(tasks_dir))
except ValueError:
pass
# sort the scripts to honor numbered order (00-foo, 01-bar, etc.)
scripts.sort()
# return the list
return scripts
def call_tasks(scripts):
''' run the scripts from the input list '''
for script in scripts:
scriptname = os.path.basename(script)
try:
status = subprocess.call(script)
except OSError:
log('task {0} skipped'.format(scriptname))
else:
if status == 0:
log('task {0} completed'.format(scriptname))
else:
log('task {0} failed ({1})'.format(scriptname, status))
else:
log('finished processing tasks')
def main():
settings = parse_config()
if query_api(settings):
scripts = get_tasks(settings)
call_tasks(scripts)
else:
sys.exit(1)
if __name__ == '__main__':
main()
# vim: set syntax=python sw=4 ts=4 expandtab :
|
Python
| 0
|
@@ -1195,16 +1195,70 @@
s': 60%7D%0A
+ # overwrite defaults with values from config file%0A
try:
@@ -1436,24 +1436,31 @@
#
+do the
overwrite th
@@ -1460,51 +1460,8 @@
rite
- the default setting to the one from config
%0A
@@ -2536,86 +2536,243 @@
-region = output%5B0%5D.rstrip('%5Cn')%0A elif 'Permission denied' in output%5B1%5D:
+# output on stdout is our region%0A region = output%5B0%5D.rstrip('%5Cn')%0A msg = 'obtained region from xenstore'%0A elif 'Permission denied' in output%5B1%5D:%0A # stderr probably means script wasn't run as root
%0A
@@ -4927,16 +4927,64 @@
cripts:%0A
+ # strip off the path to the script name%0A
@@ -5026,32 +5026,86 @@
t)%0A try:%0A
+ # run the script and save the exit status%0A
stat
@@ -5153,24 +5153,53 @@
pt OSError:%0A
+ # not executable%0A
|
2cdfff730e66dccf749ca855e3c255568e248d01
|
Use Unknown message with right path
|
vertica_python/vertica/messages/message.py
|
vertica_python/vertica/messages/message.py
|
import types
from struct import pack
from vertica_python.vertica.messages import *
class Message(object):
@classmethod
def _message_id(cls, message_id):
instance_message_id = message_id
def message_id(self):
return instance_message_id
setattr(cls, 'message_id', types.MethodType(message_id, cls))
def message_string(self, msg):
if isinstance(msg, list):
msg = ''.join(msg)
if hasattr(msg, 'bytesize'):
bytesize = msg.bytesize + 4
else:
bytesize = len(msg) + 4
message_size = pack('!I', bytesize)
if self.message_id() is not None:
msg_with_size = self.message_id() + message_size + msg
else:
msg_with_size = message_size + msg
return msg_with_size
class BackendMessage(Message):
MessageIdMap = {}
@classmethod
def factory(cls, type_, data):
klass = cls.MessageIdMap[type_]
if klass is not None:
return klass(data)
else:
return messages.Unknown(type_, data)
@classmethod
def _message_id(cls, message_id):
super(BackendMessage, cls)
cls.MessageIdMap[message_id] = cls
class FrontendMessage(Message):
def to_bytes(self):
return self.message_string(b'')
|
Python
| 0
|
@@ -1063,17 +1063,8 @@
urn
-messages.
Unkn
|
acf7d9c9748531d4bc800353a71f0b152fda6d53
|
Update map-sum-pairs.py
|
Python/map-sum-pairs.py
|
Python/map-sum-pairs.py
|
# Time: O(n), n is the length of key
# Space: O(t), t is the total size of trie
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
_trie = lambda: collections.defaultdict(_trie)
self.__root = _trie()
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
# Time: O(n)
curr = self.__root
for c in key:
curr = curr[c]
delta = val
if "_end" in curr:
delta -= curr["_end"]
curr = self.__root
for c in key:
curr = curr[c]
if "_count" in curr:
curr["_count"] += delta
else:
curr["_count"] = delta
curr["_end"] = val
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
# Time: O(n)
curr = self.__root
for c in prefix:
if c not in curr:
return 0
curr = curr[c]
return curr["_count"]
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
|
Python
| 0.000001
|
@@ -59,27 +59,31 @@
the
-total size of
+number of nodes in
trie
-
%0A%0Acl
|
98896c222c2686dbab96b58819c08131d31dc1b7
|
Update self-crossing.py
|
Python/self-crossing.py
|
Python/self-crossing.py
|
# Time: O(n)
# Space: O(1)
# You are given an array x of n positive numbers.
# You start at point (0,0) and moves x[0] metres to
# the north, then x[1] metres to the west, x[2] metres
# to the south, x[3] metres to the east and so on.
# In other words, after each move your direction changes counter-clockwise.
#
# Write a one-pass algorithm with O(1) extra space to determine,
# if your path crosses itself, or not.
#
# Example 1:
# Given x = [2, 1, 1, 2]
# Return true (self crossing)
# Example 2:
# Given x = [1, 2, 3, 4]
# Return false (not self crossing)
# Example 3:
# Given x = [1, 1, 1, 1]
# Return true (self crossing)
class Solution(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
if len(x) >= 5 and x[3] == x[1] and x[4] + x[0] >= x[2]:
# Crossing in a loop:
# 2
# 3 ┌────┐
# └─══>┘1
# 4 0 (overlapped)
return True
for i in xrange(3, len(x)):
if x[i] >= x[i - 2] and x[i - 3] >= x[i - 1]:
# Case 1:
# i-2
# i-1┌─┐
# └─┼─>i
# i-3
return True
elif i >= 5 and x[i - 4] <= x[i - 2] and x[i] + x[i - 4] >= x[i - 2] and \
x[i - 1] <= x[i - 3] and x[i - 5] + x[i - 1] >= x[i - 3]:
# Case 2:
# i-4
# ┌──┐
# │i<┼─┐
# i-3│ i-5│i-1
# └────┘
# i-2
return True
return False
|
Python
| 0
|
@@ -1344,16 +1344,21 @@
+
x%5Bi - 1%5D
|
bd5e4dc55341e6ec98bf17211d7c3c6fdb99a3b1
|
Use country_template in test_extra_params
|
tests/core/test_extra_params.py
|
tests/core/test_extra_params.py
|
# -*- coding: utf-8 -*-
from openfisca_core import periods
from openfisca_core.columns import IntCol, BoolCol
from openfisca_core.periods import MONTH
from openfisca_core.variables import Variable
import openfisca_dummy_country as dummy_country
from openfisca_dummy_country.entities import Individu
from openfisca_core.tools import assert_near
from openfisca_core.base_functions import requested_period_last_value
class formula_1(Variable):
column = IntCol
entity = Individu
definition_period = MONTH
def function(self, simulation, period):
return simulation.calculate('formula_3', period, extra_params = [0])
class formula_2(Variable):
column = IntCol
entity = Individu
definition_period = MONTH
def function(self, simulation, period):
return simulation.calculate('formula_3', period, extra_params = [1])
class formula_3(Variable):
column = IntCol
entity = Individu
definition_period = MONTH
def function(self, simulation, period, choice):
return self.zeros() + choice
class formula_4(Variable):
column = BoolCol
entity = Individu
base_function = requested_period_last_value
definition_period = MONTH
def function(self, simulation, period, choice):
return self.zeros() + choice
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = dummy_country.DummyTaxBenefitSystem()
tax_benefit_system.add_variables(formula_1, formula_2, formula_3, formula_4)
reference_period = periods.period(u'2013-01')
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period.first_month,
parent1 = dict(),
).new_simulation(debug = True)
formula_1_result = simulation.calculate('formula_1', period = reference_period)
formula_2_result = simulation.calculate('formula_2', period = reference_period)
formula_3_holder = simulation.holder_by_name['formula_3']
def test_cache():
assert_near(formula_1_result, [0])
assert_near(formula_2_result, [1])
def test_get_extra_param_names():
assert formula_3_holder.get_extra_param_names(period = None) == ('choice',)
def test_json_conversion():
print(formula_3_holder.to_value_json())
assert str(formula_3_holder.to_value_json()) == \
"{'2013-01': {'{choice: 1}': [1], '{choice: 0}': [0]}}"
def test_base_functions():
assert simulation.calculate('formula_4', '2013-01', extra_params = [0]) == 0
assert simulation.calculate('formula_4', '2013-01', extra_params = [1]) == 1
# With the 'requested_period_last_value' base_function,
# the value on an month can be infered from the year value, without running the function for that month
assert simulation.calculate('formula_4', "2013-04", extra_params = [1]) == 1
|
Python
| 0.000031
|
@@ -192,22 +192,20 @@
ariable%0A
-import
+from
openfis
@@ -211,38 +211,55 @@
sca_
-dummy_
country
- as dummy_country
+_template import CountryTaxBenefitSystem
%0Afro
@@ -270,29 +270,32 @@
enfisca_
-dummy_
country
+_template
.entitie
@@ -303,24 +303,22 @@
import
-Individu
+Person
%0Afrom op
@@ -483,32 +483,30 @@
entity =
-Individu
+Person
%0A definit
@@ -704,32 +704,30 @@
entity =
-Individu
+Person
%0A definit
@@ -925,32 +925,30 @@
entity =
-Individu
+Person
%0A definit
@@ -1123,16 +1123,14 @@
y =
-Individu
+Person
%0A
@@ -1373,26 +1373,14 @@
m =
-dummy_country.Dumm
+Countr
yTax
@@ -1576,21 +1576,23 @@
nit_
-single_entity
+from_attributes
(%0A
@@ -1636,30 +1636,8 @@
th,%0A
- parent1 = dict(),%0A
|
205df82b9eabed709db90c8de3473a883c9c2c1b
|
reorder tinymce toolbars
|
wheelcms_axle/settings/settings_tinymce.py
|
wheelcms_axle/settings/settings_tinymce.py
|
TINYMCE_DEFAULT_CONFIG = {
'theme': "advanced",
'content_css': '/static/css/wheel_content.css',
'style_formats': [
{ 'title': 'Images'},
{ 'title': 'Original Size Image', 'selector': 'img', 'attributes': {'class': 'img_content_original' }},
{ 'title': 'Thumbnail Image', 'selector': 'img', 'attributes': {'class': 'img_content_thumb' }},
{ 'title': 'Small Image', 'selector': 'img', 'attributes': {'class': 'img_content_small' }},
{ 'title': 'Medium Image', 'selector': 'img', 'attributes': {'class': 'img_content_medium'} },
{ 'title': 'Large Image', 'selector': 'img', 'attributes': {'class': 'img_content_large' }},
#{ 'title': 'Test'},
#{ 'title': "Boohoold", 'inline': 'b' },
],
'relative_urls': False,
'theme_advanced_toolbar_location':'top',
'theme_advanced_resizing':True,
'plugins':'table, paste, wheel_browser',
'table_styles' : "Header 1=header1;Header 2=header2;Header 3=header3",
'table_cell_styles' : "Header 1=header1;Header 2=header2;Header 3=header3;Table Cell=tableCel1",
'table_row_styles' : "Header 1=header1;Header 2=header2;Header 3=header3;Table Row=tableRow1",
'table_cell_limit' : 100,
'table_row_limit' : 5,
'table_col_limit' : 5,
'width':800,
'height':600,
'theme_advanced_buttons1' : "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,styleselect,formatselect",
'theme_advanced_buttons2' : "bullist,numlist,|,outdent,indent,|,undo,redo,|,link,unlink,anchor,image,cleanup,help,code,hr,removeformat,visualaid,|,sub,sup,|,charmap",
'theme_advanced_buttons3': "tablecontrols,|,pastetext,pasteword,selectall",
'paste_auto_cleanup_on_paste' : True
}
|
Python
| 0.000001
|
@@ -1347,16 +1347,57 @@
ns1' : %22
+%7C,undo,redo,%7C,styleselect,formatselect,%7C,
bold,ita
@@ -1483,115 +1483,96 @@
l,%7C,
-styleselect,formatselect%22,%0A 'theme_advanced_buttons2' : %22bullist,numlist,%7C,outdent,indent,%7C,undo,redo,%7C,
+bullist,numlist,%7C,outdent,indent,%7C,sub,sup,%7C,charmap%22,%0A 'theme_advanced_buttons2' : %22
link
@@ -1604,13 +1604,8 @@
nup,
-help,
code
@@ -1637,60 +1637,8 @@
d,%7C,
-sub,sup,%7C,charmap%22,%0A 'theme_advanced_buttons3': %22
tabl
|
e9bac7c9d980889cbab098934c392b7514c41a00
|
remove printall plugin.
|
plugins/default.py
|
plugins/default.py
|
from alebot import Alebot, Hook, Event
class ConnectionReadyHook(Hook):
"""
This is a hook that can be subclassed in case you want to react
on a irc connection that is ready for commands. It waits for
the end of the motd, or the message that there is no motd.
The :func:`match` function was implemented to listen to the
correct events. You will just have to overwrite the :func`call`
to actually do something.
"""
def match(self, event):
return (event.name == '376' or event.name == '422')
class CommandHook(Hook):
"""
This is a hook that can be subclassed in case you want to react
to a message on a channel or in private. It will react to the
bot's current nickname followed by a colon and the command
specified in the command attribute.
"""
command = None
def match(self, event):
return (event.name == 'PRIVMSG' and event.body == '%s: %s' % (
self.bot.config.get('nick'), self.command))
@Alebot.hook
class SocketConnectedHook(Hook):
"""
As the bot does nothing itself, this plugin takes care of
identifying the bot with the server. Yeah, seriously.
It uses the made up `SOCK_CONNECTED` event that is not even
an actual IRC event..
"""
def match(self, event):
return (event.name == 'SOCK_CONNECTED')
def call(self, event):
print("Socket is ready, logging in.")
self.send_raw("NICK %s" % self.bot.config['nick'])
self.send_raw("USER %s * %s :%s" % (
self.bot.config['ident'],
self.bot.config['ident'],
self.bot.config['realname']
))
@Alebot.hook
class PingPong(Hook):
"""
As the bot does nothing by itself, this plugin takes care of
sending PONGs as answer to pings, as the bot won't even do that.
It matches the `PING` event to do that.
"""
def match(self, event):
return (event.name == 'PING')
def call(self, event):
print('Received ping, sending pong.')
self.send_raw('PONG %s' % event.body)
@Alebot.hook
class JoinOnConnect(ConnectionReadyHook):
"""
Join channels defined in the config file options `channels` on
connection. If there are any definied, if not, it does not
join any channels.
"""
def call(self, event):
print("Joining channels..")
channels = self.bot.config.get('channels', [])
for channel in channels:
self.send_raw('JOIN %s' % channel)
@Alebot.hook
class PrintAll(Hook):
"""
Prints all server input to the terminal.
"""
def match(self, event):
return True
def call(self, event):
print(event.name, event.user, event.target, event.body)
|
Python
| 0
|
@@ -2585,248 +2585,4 @@
l)%0A%0A
-%0A%0A@Alebot.hook%0Aclass PrintAll(Hook):%0A%0A %22%22%22%0A Prints all server input to the terminal.%0A %22%22%22%0A%0A def match(self, event):%0A return True%0A%0A def call(self, event):%0A print(event.name, event.user, event.target, event.body)%0A
|
a8e1c75cb28413fc66d77c2050b9b05b6b2a82d2
|
Deal with full spectral pol sumwt case
|
workflows/shared/imaging/imaging_shared.py
|
workflows/shared/imaging/imaging_shared.py
|
""" Imaging context definitions, potentially shared by other workflows
"""
import numpy
import logging
from processing_components.imaging.base import normalize_sumwt
from processing_components.visibility.base import copy_visibility
from processing_components.image.operations import copy_image, calculate_image_frequency_moments
log = logging.getLogger(__name__)
from processing_components.imaging.base import predict_2d, invert_2d
from processing_components.visibility.iterators import vis_null_iter, vis_timeslice_iter, vis_wslice_iter
from processing_components.imaging.timeslice_single import predict_timeslice_single, invert_timeslice_single
from processing_components.imaging.wstack_single import predict_wstack_single, invert_wstack_single
from processing_components.image.operations import create_empty_image_like
def imaging_contexts():
"""Contains all the context information for imaging
The fields are:
predict: Predict function to be used
invert: Invert function to be used
image_iterator: Iterator for traversing images
vis_iterator: Iterator for traversing visibilities
inner: The innermost axis
:return:
"""
contexts = {'2d': {'predict': predict_2d,
'invert': invert_2d,
'vis_iterator': vis_null_iter},
'wprojection': {'predict': predict_2d,
'invert': invert_2d,
'vis_iterator': vis_null_iter},
'wsnapshots': {'predict': predict_timeslice_single,
'invert': invert_timeslice_single,
'vis_iterator': vis_timeslice_iter},
'facets': {'predict': predict_2d,
'invert': invert_2d,
'vis_iterator': vis_null_iter},
'facets_timeslice': {'predict': predict_timeslice_single,
'invert': invert_timeslice_single,
'vis_iterator': vis_timeslice_iter},
'facets_wstack': {'predict': predict_wstack_single,
'invert': invert_wstack_single,
'vis_iterator': vis_wslice_iter},
'timeslice': {'predict': predict_timeslice_single,
'invert': invert_timeslice_single,
'vis_iterator': vis_timeslice_iter},
'wstack': {'predict': predict_wstack_single,
'invert': invert_wstack_single,
'vis_iterator': vis_wslice_iter}}
return contexts
def imaging_context(context='2d'):
contexts = imaging_contexts()
assert context in contexts.keys(), context
return contexts[context]
def sum_invert_results_local(image_list):
""" Sum a set of invert results with appropriate weighting
without normalize_sumwt at the end
:param image_list: List of [image, sum weights] pairs
:return: image, sum of weights
"""
first = True
sumwt = 0.0
im = None
for i, arg in enumerate(image_list):
if arg is not None:
if isinstance(arg[1], numpy.ndarray):
scale = arg[1][..., numpy.newaxis, numpy.newaxis]
else:
scale = arg[1]
if first:
im = copy_image(arg[0])
im.data *= scale
sumwt = arg[1].copy()
first = False
else:
im.data += scale * arg[0].data
sumwt += arg[1]
assert not first, "No invert results"
return im, sumwt
def sum_invert_results(image_list):
""" Sum a set of invert results with appropriate weighting
:param image_list: List of [image, sum weights] pairs
:return: image, sum of weights
"""
if len(image_list) == 1:
return image_list[0]
im = create_empty_image_like(image_list[0][0])
sumwt = image_list[0][1].copy()
sumwt *= 0.0
for i, arg in enumerate(image_list):
if arg is not None:
im.data += arg[1] * arg[0].data
sumwt += arg[1]
im = normalize_sumwt(im, sumwt)
return im, sumwt
def remove_sumwt(results):
""" Remove sumwt term in list of tuples (image, sumwt)
:param results:
:return: A list of just the dirty images
"""
return [d[0] for d in results]
def sum_predict_results(results):
""" Sum a set of predict results of the same shape
:param results: List of visibilities to be summed
:return: summed visibility
"""
sum_results = None
for result in results:
if result is not None:
if sum_results is None:
sum_results = copy_visibility(result)
else:
assert sum_results.data['vis'].shape == result.data['vis'].shape
sum_results.data['vis'] += result.data['vis']
return sum_results
def threshold_list(imagelist, threshold, fractional_threshold, use_moment0=True, prefix=''):
""" Find actual threshold for list of results, optionally using moment 0
:param imagelist:
:param threshold: Absolute threshold
:param fractional_threshold: Fractional threshold
:param use_moment0: Use moment 0 for threshold
:return:
"""
peak = 0.0
for i, result in enumerate(imagelist):
if use_moment0:
moments = calculate_image_frequency_moments(result)
this_peak = numpy.max(numpy.abs(moments.data[0, ...] / result.shape[0]))
peak = max(peak, this_peak)
log.info("threshold_list: using moment 0, sub_image %d, peak = %f," % (i, this_peak))
else:
ref_chan = result.data.shape[0] // 2
this_peak = numpy.max(numpy.abs(result.data[ref_chan]))
peak = max(peak, this_peak)
log.info("threshold_list: using refchan %d , sub_image %d, peak = %f," % (ref_chan, i, this_peak))
actual = max(peak * fractional_threshold, threshold)
if use_moment0:
log.info("threshold_list %s: Global peak in moment 0 = %.6f, sub-image clean threshold will be %.6f" % (prefix,
peak,
actual))
else:
log.info("threshold_list %s: Global peak = %.6f, sub-image clean threshold will be %.6f" % (prefix, peak,
actual))
return actual
|
Python
| 0
|
@@ -4134,16 +4134,51 @@
= arg%5B1%5D
+%5B..., numpy.newaxis, numpy.newaxis%5D
* arg%5B0
|
6015cb96e4a35112efcf0ee35e38c88a94a58004
|
Add API key to headers
|
cogs/diffusion.py
|
cogs/diffusion.py
|
import asyncio
import backoff
from typing import Any, Literal
from discord import Embed
from discord.ext import commands
from aiohttp import ClientResponseError
from bot import QTBot
from utils.custom_context import CustomContext
class DiffusionError(Exception):
pass
class Diffusion(commands.Cog):
INPUT = {
"input": {
"width": 512,
"height": 512,
"num_outputs": "1",
"guidance_scale": 7.5,
"prompt_strength": 0.8,
"num_inference_steps": 50,
}
}
URL = "https://replicate.com/api/models/stability-ai/stable-diffusion/versions/a9758cbfbd5f3c2094457d996681af52552901775aa2d6dd0b17fd15df959bef/predictions"
def __init__(self, bot: QTBot):
self.bot = bot
@backoff.on_exception(backoff.expo, ClientResponseError, max_tries=3)
async def req(
self, verb: Literal["GET", "POST"], url: str = "", params: dict = None, headers: dict = None, data: dict = None
) -> Any:
resp = await self.bot.aio_session.request(verb, f"{self.URL}{url}", params=params, headers=headers, json=data)
resp.raise_for_status()
return await resp.json()
async def start_job(self, prompt: str) -> str:
payload = {**self.INPUT, "prompt": prompt}
resp = await self.req("POST", data=payload)
if resp["error"]:
raise DiffusionError(resp["error"])
return resp["uuid"]
async def check_progress(self, id: str) -> str:
total_checks = 0
while True:
resp = (await self.req("GET", f"/{id}"))["prediction"]
if total_checks >= 10:
raise asyncio.TimeoutError("Couldn't get a result after 20 seconds. Aborting.")
if resp["error"]:
raise DiffusionError(resp["error"])
if resp["completed_at"]:
return resp["output"][0]
total_checks += 1
asyncio.sleep(2)
@commands.command(aliases=["diffuse", "sd"])
async def diffusion(self, ctx: CustomContext, *, prompt: str) -> None:
try:
job_id = await self.start_job(prompt)
except DiffusionError as e:
return await ctx.error("API Error", str(e))
except ClientResponseError as e:
return await ctx.error("API Error", f"Received status code {e.status}\n{e.message}")
try:
image_url = await self.check_progress(job_id)
except DiffusionError as e:
return await ctx.error("API Error", str(e))
except ClientResponseError as e:
return await ctx.error("API Error", f"Received status code {e.status}\n{e.message}")
return await ctx.send(f"{ctx.author.mention}: {prompt}\n{image_url}")
def setup(bot):
bot.add_cog(Diffusion(bot))
|
Python
| 0
|
@@ -8,16 +8,28 @@
asyncio%0A
+import json%0A
import b
@@ -722,44 +722,272 @@
ns%22%0A
-%0A def __init__(self, bot: QTBot):
+ HEADERS = %7B%22Content-Type%22: %22application/json%22%7D%0A%0A def __init__(self, bot: QTBot):%0A with open(%22data/apikeys.json%22) as f:%0A self.api_key = json.load(f)%5B%22stable_diffusion%22%5D%0A self.HEADERS.update(%7B%22Authorization%22: f%22Token %7Bself.api_key%7D%22%7D)
%0A
@@ -1113,16 +1113,24 @@
self,
+%0A
verb: L
@@ -1151,16 +1151,24 @@
%22POST%22%5D,
+%0A
url: st
@@ -1174,16 +1174,24 @@
tr = %22%22,
+%0A
params:
@@ -1198,21 +1198,27 @@
dict =
-None,
+%7B%7D,%0A
headers
@@ -1230,13 +1230,19 @@
t =
-None,
+%7B%7D,%0A
dat
@@ -1255,16 +1255,17 @@
t = None
+,
%0A ) -
@@ -1321,16 +1321,29 @@
request(
+%0A
verb, f%22
@@ -1383,16 +1383,19 @@
headers=
+%7B**
headers,
@@ -1399,17 +1399,43 @@
rs,
-json=data
+**self.HEADERS%7D, json=data%0A
)%0A
@@ -2656,32 +2656,33 @@
ved status code
+%60
%7Be.status%7D%5Cn%7Be.m
@@ -2667,32 +2667,33 @@
code %60%7Be.status%7D
+%60
%5Cn%7Be.message%7D%22)%0A
@@ -2968,16 +2968,17 @@
us code
+%60
%7Be.statu
@@ -2979,16 +2979,17 @@
.status%7D
+%60
%5Cn%7Be.mes
|
1363a09366594602562bcf34a5368c71299c4755
|
Add zero in lol-regexp
|
plugins/lolrate.py
|
plugins/lolrate.py
|
from datetime import datetime
from toflib import cmd, Plugin
import re
class TimeSlice():
def __init__(self):
t = datetime.now()
self.date = t.date()
self.hour = t.hour
self.kevins = dict()
self.count = 0
def __str__(self):
return "%s %02dh-%02dh : %d lolz" % ( self.date.strftime("%d %b")
, self.hour
, self.hour+1 % 24
, self.count
)
def __cmp__(self, other):
return cmp ( (self.date, self.hour)
, (other.date, other.hour)
)
def __hash__(self):
return hash(self.date) + hash(self.hour)
def lol(self, nick, count):
self.kevins.setdefault(nick,0)
self.kevins[nick] += count
self.count += count
class PluginLolrate(Plugin):
def __init__(self, bot):
Plugin.__init__(self, bot)
self.lolRate = [TimeSlice()]
bot._mutable_attributes['lolRateDepth'] = int
def handle_msg(self, msg_text, chan, nick):
lulz = len(re.findall("[Ll]+[oO]+[Ll]+", msg_text))
if lulz > 0:
ts = TimeSlice()
if ts != self.lolRate[0]:
self.lolRate.insert(0,ts)
if len(self.lolRate) > self.bot.lolRateDepth:
self.lolRate.pop()
self.lolRate[0].lol(nick,lulz)
@cmd(0)
def cmd_lulz(self, chan, args):
for lolade in self.lolRate:
self.say(str(lolade))
@cmd(0)
def cmd_kevin(self, chan, args):
kevins = dict()
for lolade in self.lolRate:
for kevin in lolade.kevins.iteritems():
kevins.setdefault(kevin[0],0)
kevins[kevin[0]] += kevin[1]
if len(kevins) > 0:
kevin = max(kevins,key=lambda a: kevins.get(a))
lolades = kevins[kevin]
self.say(str(kevin) + " est le Kevin du moment avec " + str(lolades) + " lolade" + ("s" if lolades > 1 else ""))
else:
self.say("pas de Kevin")
|
Python
| 0.999315
|
@@ -1190,16 +1190,17 @@
%5BLl%5D+%5BoO
+0
%5D+%5BLl%5D+%22
|
492486fd561a039d63049e8cbc252d9981efcd6a
|
Change cb time for svoboda msg
|
summer_bot/bot.py
|
summer_bot/bot.py
|
import datetime
import logging
import random
from textwrap import dedent
from simple_settings import settings
from telegram.ext import Updater, CommandHandler, Job
import pytz
import emoji
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
DEFAULT_TZ = pytz.timezone(settings.DEFAULT_TIMEZONE)
def tznow(tz=None):
utcnow = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
if tz is None:
tz = DEFAULT_TZ
else:
tz = pytz.timezone(tz)
return utcnow.astimezone(tz)
def get_days_left_in_summer(tz=None):
tznow_date = tznow().date()
first_day = datetime.date(tznow_date.year, 6, 1)
last_day = datetime.date(tznow_date.year, 9, 1)
if first_day <= tznow_date <= last_day:
return (last_day - tznow_date).days
else:
return 0
RESPONSES = '''\
It is certain
It is decidedly so
Without a doubt
Yes definitely
You may rely on it
As I see it, yes
Most likely
Outlook good
Yes
Signs point to yes
Reply hazy try again
Ask again later
Better not tell you now
Cannot predict now
Concentrate and ask again
Don't count on it
My reply is no
My sources say no
Outlook not so good
Very doubtful
'''.split('\n')
def start(bot, update):
bot.send_message(
chat_id=update.message.chat_id,
text=dedent("""\
Yo yo yo!!! I am summer bot and I can:
/summerdays - I will write to the chat how many days left
/magicball - Ask me something
"""
)
)
def magic_8_ball(bot, update):
answer = random.choice(RESPONSES)
bot.send_message(
chat_id=update.message.chat_id,
reply_to_message_id=update.message.message_id,
text='🎱 {}'.format(answer)
)
def days_left(bot, update):
days_left = get_days_left_in_summer()
if days_left == 0:
bot.send_message(
chat_id=update.message.chat_id,
text=(
'лето кончилось :('
)
)
return
days_left_100 = days_left % 100
days_left_10 = days_left % 10
if (
(days_left_100 < 10 or days_left_100 > 20) and
1 <= days_left_10 < 5
):
if days_left_10 == 1:
days_text = '{}день'.format(days_left)
else:
days_text = '{}дня'.format(days_left)
else:
days_text = '{}дней'.format(days_left)
bot.send_message(
chat_id=update.message.chat_id,
text=(
'#ровноцелых{} 🌞'.format(days_text)
)
)
def callback_1900(bot, job):
bot.send_message(
chat_id=settings.SVOBODA_CHAT_ID,
text='Го в Свобода'
)
next_run = 24 * 60 * 60
logging.info("next run in {} seconds".format(next_run))
job.interval = next_run
def main():
updater = Updater(token=settings.API_KEY)
dispatcher = updater.dispatcher
jq = updater.job_queue
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
days_handler = CommandHandler('summerdays', days_left)
dispatcher.add_handler(days_handler)
magic_ball_handler = CommandHandler('magicball', magic_8_ball)
dispatcher.add_handler(magic_ball_handler)
if settings.SVOBODA_CHAT_ID:
moscow_now = tznow()
cb_time = datetime.time(1, 35)
if moscow_now.time() > cb_time:
day = moscow_now.date() + datetime.timedelta(days=1)
else:
day = moscow_now.date()
cb_dtime = DEFAULT_TZ.localize(datetime.datetime.combine(day, cb_time))
delta = cb_dtime - moscow_now
logging.info('Cb dtime {} now is {}'.format(cb_dtime, moscow_now))
logging.info('Set job after {} seconds'.format(delta.total_seconds()))
jq.put(Job(callback_1900, delta.total_seconds()))
updater.start_polling()
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -3429,12 +3429,12 @@
me(1
+9
,
-35
+0
)%0A
|
4d2f3431c587015d6962250fcc4ebcda06f0f988
|
Update TOKEN_INCORRECT_CVV error in dummy payment
|
saleor/payment/gateways/dummy/__init__.py
|
saleor/payment/gateways/dummy/__init__.py
|
import uuid
from typing import Optional
from ... import TransactionKind
from ...interface import GatewayConfig, GatewayResponse, PaymentData
TOKEN_PREAUTHORIZE_SUCCESS = "4111111111111112"
TOKEN_PREAUTHORIZE_DECLINE = "4111111111111111"
TOKEN_EXPIRED = "4000000000000069"
TOKEN_INSUFFICIENT_FUNDS = "4000000000009995"
TOKEN_INCORRECT_CVV = "4000000000000127"
TOKEN_DECLINE = "4000000000000002"
PREAUTHORIZED_TOKENS = [TOKEN_PREAUTHORIZE_DECLINE, TOKEN_PREAUTHORIZE_SUCCESS]
TOKEN_VALIDATION_MAPPING = {
TOKEN_EXPIRED: "Card expired",
TOKEN_INSUFFICIENT_FUNDS: "Insufficient funds",
TOKEN_INCORRECT_CVV: "Incorrect cvv",
TOKEN_DECLINE: "Card declined",
TOKEN_PREAUTHORIZE_DECLINE: "Card declined",
}
def dummy_success():
return True
def validate_token(token: Optional[str]):
return TOKEN_VALIDATION_MAPPING.get(token, None) if token else None
def get_client_token(**_):
return str(uuid.uuid4())
def authorize(
payment_information: PaymentData, config: GatewayConfig
) -> GatewayResponse:
success = dummy_success()
error = None
if not success:
error = "Unable to authorize transaction"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.AUTH,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def void(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
error = None
success = dummy_success()
if not success:
error = "Unable to void the transaction."
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.VOID,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def capture(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
"""Perform capture transaction."""
error = validate_token(payment_information.token)
success = not error
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.CAPTURE,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def confirm(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
"""Perform confirm transaction."""
error = None
success = dummy_success()
if not success:
error = "Unable to process capture"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.CAPTURE,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def refund(payment_information: PaymentData, config: GatewayConfig) -> GatewayResponse:
error = None
success = dummy_success()
if not success:
error = "Unable to process refund"
return GatewayResponse(
is_success=success,
action_required=False,
kind=TransactionKind.REFUND,
amount=payment_information.amount,
currency=payment_information.currency,
transaction_id=payment_information.token,
error=error,
)
def process_payment(
payment_information: PaymentData, config: GatewayConfig
) -> GatewayResponse:
"""Process the payment."""
token = payment_information.token
if token in PREAUTHORIZED_TOKENS:
authorize_response = authorize(payment_information, config)
if not config.auto_capture:
return authorize_response
return capture(payment_information, config)
|
Python
| 0
|
@@ -627,11 +627,11 @@
ect
-cvv
+CVV
%22,%0A
|
98bd8d46a4a90d2c27767bd39894189caf03f7d0
|
Correct iter call for python 3
|
changes_lxc_wrapper/cli/manager.py
|
changes_lxc_wrapper/cli/manager.py
|
#!/usr/bin/env python3
import argparse
import re
from collections import defaultdict, namedtuple
from datetime import datetime, timedelta
from ..api import ChangesApi
from ..container import SNAPSHOT_CACHE
from ..snapshot_cache import SnapshotCache
DESCRIPTION = "LXC snapshot manager"
SnapshotInfo = namedtuple('SnapshotInfo', ['id', 'path', 'size'])
def parse_size_value(value):
value = value.lower()
match = re.match(r'(\d+)(gb|g|mb|m|kb|k|b)?', value)
if not match:
raise ValueError('Unable to parse size value')
number = int(match.group(1))
key = match.group(2)
if key in ('gb', 'g'):
return number / 1024 / 1024 / 1024
elif key in ('mb', 'm'):
return number / 1024 / 1024
elif key in ('kb', 'k'):
return number / 1024
return number
def parse_ttl_date(value):
return datetime.utcnow() - timedelta(seconds=int(value))
def format_size_value(value):
if value > 1024 * 1024 * 1024:
return '{}GB'.format(value // 1024 // 1024 // 1024)
if value > 1024 * 1024:
return '{}MB'.format(value // 1024 // 1024)
if value > 1024:
return '{}KB'.format(value // 1024)
return '{}B'.format(value)
class ManagerCommand(object):
"""
Bound image cache to:
- ttl
- max-disk usage
- max-disk per class
Treat it as a semi-LRU:
- always keep 'active' snapshots
- clear out ttl'd snapshots first
- next find projects exceeding max-disk per class and clear out any up
to the active
- finally sort remainder by size and clear out biggest first
"""
def __init__(self, argv=None):
self.argv = argv
def get_arg_parser(self):
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--cache-path', default=SNAPSHOT_CACHE)
parser.add_argument('--api-url', required=True,
help="API URL to Changes (i.e. https://changes.example.com/api/0/)")
subparsers = parser.add_subparsers(dest='command')
cleanup_parser = subparsers.add_parser('cleanup', help='Clean up the local snapshot cache')
cleanup_parser.add_argument('--max-disk', required=True, type=parse_size_value)
cleanup_parser.add_argument('--max-disk-per-class', type=parse_size_value)
cleanup_parser.add_argument('--ttl', type=parse_ttl_date)
cleanup_parser.add_argument('--dry-run', action='store_true', default=False)
subparsers.add_parser('list', help='List the status of local snapshots')
return parser
def run(self):
parser = self.get_arg_parser()
args = parser.parse_args(self.argv)
api = ChangesApi(args.api_url)
cache = SnapshotCache(args.cache_path, api)
cache.initialize()
if args.command == 'cleanup':
self.run_cleanup(cache, args)
elif args.command == 'list':
self.run_list(cache, args)
def run_list(self, cache, args):
print('-' * 80)
template = '{id:41} {size:5} {is_valid:5} {project:10} {date}'
print(template.format(
id='ID',
size='Size',
is_valid='Valid',
project='Project',
date='Date',
))
print('-' * 80)
for snapshot in cache.snapshots:
print(template.format(
id=str(snapshot.id) if not snapshot.is_active else '* {}'.format(snapshot.id),
size=format_size_value(snapshot.size),
is_valid='T' if snapshot.is_valid else 'F',
project=str(snapshot.project or 'n/a'),
date=snapshot.date_created.date() if snapshot.date_created else 'n/a',
))
def run_cleanup(self, cache, args):
wipe_on_disk = not args.dry_run
if not wipe_on_disk:
print("==> DRY RUN: Not removing files on disk")
# find snapshot data within Changes
snapshots_by_class = defaultdict(list)
used_space_by_class = defaultdict(int)
def get_sort_value(snapshot):
if snapshot.date_created:
return int(snapshot.date_created.strftime('%s'))
else:
return 0
for snapshot in sorted(cache.snapshots, key=get_sort_value):
# this snapshot is unknown or has been invalidated
if not snapshot.is_valid:
print("==> Removing snapshot {} (missing upstream)".format(snapshot.id))
cache.remove(snapshot, wipe_on_disk)
continue
# check ttl to see if we can safely remove it
elif args.ttl and snapshot.date_created < args.ttl:
print("==> Removing snapshot {} (expired)".format(snapshot.id))
cache.remove(snapshot, wipe_on_disk)
continue
# add size to class pool for later determination
used_space_by_class[snapshot.project] += snapshot.size
snapshots_by_class[snapshot.project].append(snapshot)
if args.max_disk_per_class:
for project_id, class_size in used_space_by_class.items():
# keep removing old snapshots until we're under the threshold
while class_size > args.max_disk_per_class:
snapshot = snapshots_by_class.pop(0)
print("==> Removing snapshot {} (disk reclaim)".format(snapshot.id))
cache.remove(snapshot, wipe_on_disk)
class_size -= snapshot.size
# finally, ensure we're under our disk threshold or remove snapshots
# based on their size
# TODO(dcramer): we could optimize this to more evenly remove snapshots
snapshot_size_iter = iter(sorted(
cache.snapshots, key=lambda x: x.size, reverse=True))
while cache.total_size > args.max_disk:
snapshot = snapshot_size_iter.next()
print("==> Removing snapshot {} (disk reclaim)".format(snapshot.id))
cache.remove(snapshot, wipe_on_disk)
def main():
command = ManagerCommand()
command.run()
if __name__ == '__main__':
main()
|
Python
| 0.998663
|
@@ -5887,32 +5887,37 @@
snapshot =
+next(
snapshot_size_it
@@ -5922,14 +5922,8 @@
iter
-.next(
)%0A
|
7b77d2569b8056c4c1b184503165a81a426df2e9
|
Allow configuring separator in headeranchor
|
mdownx/headeranchor.py
|
mdownx/headeranchor.py
|
"""
mdownx.headeranchor
An extension for Python Markdown.
Github style tasklists
MIT license.
Copyright (c) 2014 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
from markdown.extensions.headerid import slugify, stashedHTML2text, itertext
LINK = '<a name="user-content-%(id)s" href="#%(id)s" class="headeranchor-link" aria-hidden="true"><span class="headeranchor"></span></a>'
class HeaderAnchorTreeprocessor(Treeprocessor):
def run(self, root):
""" Add header anchors """
for tag in root.getiterator():
if tag.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
if "id" in tag.attrib:
id = tag.get('id')
else:
id = stashedHTML2text(''.join(itertext(tag)), self.md)
id = slugify(id, '-')
tag.set('id', id)
tag.text = self.markdown.htmlStash.store(
LINK % {"id": id},
safe=True
) + tag.text
return root
class HeaderAnchorExtension(Extension):
def extendMarkdown(self, md, md_globals):
"""Add HeaderAnchorTreeprocessor to Markdown instance"""
self.processor = HeaderAnchorTreeprocessor(md)
self.processor.md = md
if 'toc' in md.treeprocessors.keys():
insertion = ">toc"
else:
insertion = ">_end"
md.treeprocessors.add("headeranchor", self.processor, insertion)
md.registerExtension(self)
def makeExtension(configs={}):
return HeaderAnchorExtension(configs=configs)
|
Python
| 0
|
@@ -62,22 +62,21 @@
hub
-style tasklist
+header anchor
s%0A%0AM
@@ -1936,11 +1936,30 @@
id,
-'-'
+self.config.get('sep')
)%0A
@@ -2211,16 +2211,243 @@
ension):
+%0A def __init__(self, configs):%0A self.config = %7B%0A 'sep': %5B'-', %22Separator to use when creating header ids - Default: '-'%22%5D%0A %7D%0A%0A for key, value in configs:%0A self.setConfig(key, value)
%0A%0A de
@@ -2548,24 +2548,24 @@
nstance%22%22%22%0A%0A
-
self
@@ -2603,24 +2603,74 @@
ocessor(md)%0A
+ self.processor.config = self.getConfigs()%0A
self
|
bfb014b65932902f9cc07bc85c3aadfdb320c438
|
add static config
|
chapter5/growth_studio/settings.py
|
chapter5/growth_studio/settings.py
|
"""
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
|
Python
| 0
|
@@ -3214,10 +3214,117 @@
ic/'),%0A)
+%0APROJECT_DIR = os.path.dirname(os.path.abspath(__file__))%0ASTATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
%0A%0A
|
a9fd9bcecc5d237d767a2fbb773e0780cce7fa99
|
Add combinations of all the python bins.
|
tests/functional/test_create.py
|
tests/functional/test_create.py
|
import os
import sys
import pytest
import scripttest
is_windows = (
sys.platform.startswith("win") or
(sys.platform == "cli" and os.name == "nt")
)
is_26 = sys.version_info[:2] == (2, 6)
@pytest.yield_fixture
def env(request):
env = scripttest.TestFileEnvironment()
try:
yield env
finally:
env.clear()
def test_create_via_script(env):
result = env.run('virtualenv', 'myenv')
if is_windows:
assert 'myenv\\Scripts\\activate.bat' in result.files_created
assert 'myenv\\Scripts\\activate.ps1' in result.files_created
assert 'myenv\\Scripts\\activate_this.py' in result.files_created
assert 'myenv\\Scripts\\deactivate.bat' in result.files_created
assert 'myenv\\Scripts\\pip.exe' in result.files_created
assert 'myenv\\Scripts\\python.exe' in result.files_created
else:
assert 'myenv/bin/activate.sh' in result.files_created
assert 'myenv/bin/activate_this.py' in result.files_created
assert 'myenv/bin/python' in result.files_created
def test_create_via_module(env):
result = env.run('python', '-mvirtualenv.__main__' if is_26 else '-mvirtualenv', 'myenv')
if is_windows:
assert 'myenv\\Scripts\\activate.bat' in result.files_created
assert 'myenv\\Scripts\\activate.ps1' in result.files_created
assert 'myenv\\Scripts\\activate_this.py' in result.files_created
assert 'myenv\\Scripts\\deactivate.bat' in result.files_created
assert 'myenv\\Scripts\\pip.exe' in result.files_created
assert 'myenv\\Scripts\\python.exe' in result.files_created
else:
assert 'myenv/bin/activate.sh' in result.files_created
assert 'myenv/bin/activate_this.py' in result.files_created
assert 'myenv/bin/python' in result.files_created
|
Python
| 0
|
@@ -49,26 +49,26 @@
ttest%0A%0A%0A
-is_windows
+IS_WINDOWS
= (%0A
@@ -152,18 +152,18 @@
%22nt%22)%0A)%0A
-is
+IS
_26 = sy
@@ -191,16 +191,412 @@
(2, 6)%0A
+PYTHON_BINS = %5B%0A %22C:%5C%5CPython27%5C%5Cpython.exe%22,%0A %22C:%5C%5CPython27-x64%5C%5Cpython.exe%22,%0A %22C:%5C%5CPython33%5C%5Cpython.exe%22,%0A %22C:%5C%5CPython33-x64%5C%5Cpython.exe%22,%0A %22C:%5C%5CPython34%5C%5Cpython.exe%22,%0A %22C:%5C%5CPython34-x64%5C%5Cpython.exe%22,%0A %22C:%5C%5CPyPy%5C%5Cpypy.exe%22,%0A %22C:%5C%5CPyPy3%5C%5Cpypy.exe%22,%0A None,%0A %22python%22,%0A %22python2.6%22,%0A %22python2.7%22,%0A %22python3.2%22,%0A %22python3.3%22,%0A %22python3.4%22,%0A %22pypy%22,%0A%5D%0A
%0A%0A@pytes
@@ -737,41 +737,147 @@
()%0A%0A
-%0Adef test_create_via_script(env):
+@pytest.mark.parametrize('python', PYTHON_BINS)%0Adef test_create_via_script(env, python):%0A extra = %5B'--python', python%5D if python else %5B%5D
%0A
@@ -907,32 +907,40 @@
ualenv', 'myenv'
+, *extra
)%0A if is_wind
@@ -928,34 +928,34 @@
tra)%0A if
-is_windows
+IS_WINDOWS
:%0A as
@@ -1567,40 +1567,148 @@
ed%0A%0A
-def test_create_via_module(env):
+%0A@pytest.mark.parametrize('python', PYTHON_BINS)%0Adef test_create_via_module(env, python):%0A extra = %5B'--python', python%5D if python else %5B%5D
%0A
@@ -1762,18 +1762,18 @@
n__' if
-is
+IS
_26 else
@@ -1796,16 +1796,24 @@
'myenv'
+, *extra
)%0A if
@@ -1817,18 +1817,18 @@
if
-is_windows
+IS_WINDOWS
:%0A
|
c407d023a59b5863b5890836c17a1aa1208244fa
|
use local_base_url from webfront to compose the API URL
|
tests/functional/test_webapi.py
|
tests/functional/test_webapi.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import requests
from ava.util.tests import AgentTest
SUCCESS = 'success'
ERROR = 'error'
class TestWebAPI(AgentTest):
api_url = 'http://127.0.0.1:5080/api'
def test_ping(self):
r = requests.get(self.api_url + '/ping')
assert r.status_code == 200
data = r.json()
assert data['status'] == SUCCESS
|
Python
| 0
|
@@ -227,16 +227,18 @@
st):%0A
+ #
api_url
@@ -267,16 +267,219 @@
080/api'
+%0A api_url = ''%0A%0A @classmethod%0A def setUpClass(cls):%0A AgentTest.setUpClass()%0A webfront = cls.agent.context().lookup('webfront')%0A cls.api_url = webfront.local_base_url + 'api'
%0A%0A de
|
6028ae4c2c75a29c0a050429f7ab76da129791fd
|
Improve keosd_auto_launch_test by checking stderr
|
tests/keosd_auto_launch_test.py
|
tests/keosd_auto_launch_test.py
|
#!/usr/bin/env python3
# This script tests that cleos launches keosd automatically when keosd is not
# running yet.
import subprocess
def run_cleos_wallet_command(command: str, no_auto_keosd: bool):
"""Run the given cleos command and return subprocess.CompletedProcess."""
args = ['./programs/cleos/cleos']
if no_auto_keosd:
args.append('--no-auto-keosd')
args += 'wallet', command
return subprocess.run(args,
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def stop_keosd():
"""Stop the default keosd instance."""
run_cleos_wallet_command('stop', no_auto_keosd=True)
def keosd_auto_launch_test():
"""Test that keos auto-launching works but can be optionally inhibited."""
stop_keosd()
# Make sure that when '--no-auto-keosd' is given, keosd is not started by
# cleos.
assert run_cleos_wallet_command('list',
no_auto_keosd=True).returncode != 0
# Verify that keosd auto-launching works.
assert run_cleos_wallet_command('list',
no_auto_keosd=False).returncode == 0
try:
keosd_auto_launch_test()
finally:
stop_keosd()
|
Python
| 0.000001
|
@@ -574,23 +574,20 @@
process.
-DEVNULL
+PIPE
)%0A%0A%0Adef
@@ -920,30 +920,43 @@
cleos.%0A
-assert
+completed_process =
run_cleos_w
@@ -980,80 +980,142 @@
st',
-%0A no_auto_keosd=True).returncode != 0
+ no_auto_keosd=True)%0A assert completed_process.returncode != 0%0A assert b'Failed to connect to keosd' in completed_process.stderr
%0A%0A
@@ -1162,22 +1162,35 @@
ks.%0A
-assert
+completed_process =
run_cle
@@ -1218,81 +1218,125 @@
st',
-%0A no_auto_keosd=False).returncode == 0
+ no_auto_keosd=False)%0A assert completed_process.returncode == 0%0A assert b'launched' in completed_process.stderr
%0A%0A%0At
|
ae1469e450616ba1ac50958262c2c834b75fc77c
|
store reference type in lowercase
|
citeproc/source/bibtex/bibparse.py
|
citeproc/source/bibtex/bibparse.py
|
#!/usr/bin/python
#
# Simple, naive bibtex parser
#
# Vassilios Karakoidas (2008) - vassilios.karakoidas@gmail.com
#
# bib categories
#
# @Article{
# @Book{
# @Booklet{
# @InBook{
# @InCollection{
# @InProceedings{
# @Manual{
# @MastersThesis{
# @Misc{
# @PhDThesis{
# @Proceedings{
# @TechReport{
# @Unpublished{
#
import re
import os
from io import StringIO
class BibtexEntry:
def __init__(self, bibfile):
self.key = ''
self.data = {}
self.btype = ''
self.data['filename'] = bibfile
def getKey(self, key):
if(key.lower().strip() == self.key.lower()):
return True
return False
def search(self, keywords):
for word in keywords:
for (k, v) in self.data.items():
try:
v.lower().index(word.lower())
return True
except ValueError:
continue
return False
def __get_pdf_name(self):
if len(self.key) == 0:
return None
m = re.match('(.+/[^.]+)\\.bib', self.data['filename'])
if m == None:
return None
filename = "%s/%s.pdf" % ( m.group(1).strip(), self.key.lower() )
if os.access(filename, os.O_RDONLY) == 1:
return filename
return None
def has_pdf(self):
return (self.__get_pdf_name() != None)
def export(self):
return self.__str__()
def totext(self):
return
def tohtml(self):
return
def __str__(self):
result = StringIO()
result.write("@%s{%s,\n" % ( self.btype.lower().strip(), self.key.strip() ))
for k, v in self.data.items():
result.write("\t%s = {%s},\n" % ( k.title().strip(), v.strip() ))
filename = self.__get_pdf_name()
if filename != None:
result.write("\tpdf-file = {%s},\n" % ( filename, ))
result.write('}\n')
return result.getvalue()
def parse_bib(bibfile):
bibitems = {}
bib_file = open(bibfile, "r")
re_head = re.compile('@([a-zA-Z]+)[ ]*\{[ ]*(.*),')
current = None
for l in bib_file:
mr = re_head.match(l.strip())
if mr != None:
if current == None:
current = BibtexEntry(bibfile)
else:
bibitems[current.key] = current
current = BibtexEntry(bibfile)
current.key = mr.group(2).strip()
current.btype = mr.group(1).strip()
continue
try:
l.index('=')
kv_data = l.split('=')
key = kv_data[0].strip()
mr = re.search('["{](.+)["}]',kv_data[1].strip())
if mr != None:
current.data[key] = mr.group(1).strip()
except (ValueError, AttributeError):
continue
bibitems[current.key] = current
bib_file.close()
return bibitems
|
Python
| 0.000334
|
@@ -2081,24 +2081,32 @@
p(1).strip()
+.lower()
%0A%09%09%09continue
|
d43ddab5908a543236a05860fb15658ec154aa5b
|
Fix import in test
|
tests/outputs/gstreamer_test.py
|
tests/outputs/gstreamer_test.py
|
import multiprocessing
import unittest
from mopidy.outputs.gstreamer import GStreamerOutput
from mopidy.process import pickle_connection
from mopidy.utils.path import path_to_uri
from tests import data_folder, SkipTest
class GStreamerOutputTest(unittest.TestCase):
def setUp(self):
self.song_uri = path_to_uri(data_folder('song1.wav'))
self.output_queue = multiprocessing.Queue()
self.core_queue = multiprocessing.Queue()
self.output = GStreamerOutput(self.core_queue, self.output_queue)
def tearDown(self):
self.output.destroy()
def send_recv(self, message):
(my_end, other_end) = multiprocessing.Pipe()
message.update({'reply_to': pickle_connection(other_end)})
self.output_queue.put(message)
my_end.poll(None)
return my_end.recv()
def send(self, message):
self.output_queue.put(message)
@SkipTest
def test_play_uri_existing_file(self):
message = {'command': 'play_uri', 'uri': self.song_uri}
self.assertEqual(True, self.send_recv(message))
@SkipTest
def test_play_uri_non_existing_file(self):
message = {'command': 'play_uri', 'uri': self.song_uri + 'bogus'}
self.assertEqual(False, self.send_recv(message))
def test_default_get_volume_result(self):
message = {'command': 'get_volume'}
self.assertEqual(100, self.send_recv(message))
def test_set_volume(self):
self.send({'command': 'set_volume', 'volume': 50})
self.assertEqual(50, self.send_recv({'command': 'get_volume'}))
def test_set_volume_to_zero(self):
self.send({'command': 'set_volume', 'volume': 0})
self.assertEqual(0, self.send_recv({'command': 'get_volume'}))
def test_set_volume_to_one_hundred(self):
self.send({'command': 'set_volume', 'volume': 100})
self.assertEqual(100, self.send_recv({'command': 'get_volume'}))
@SkipTest
def test_set_state(self):
raise NotImplementedError
|
Python
| 0.000001
|
@@ -102,40 +102,37 @@
idy.
-process import pickle_connection
+utils.path import path_to_uri
%0Afro
@@ -139,35 +139,38 @@
m mopidy.utils.p
-ath
+rocess
import path_to_
@@ -162,26 +162,32 @@
import p
-ath_to_uri
+ickle_connection
%0A%0Afrom t
|
010a827abdc891bd79f7474c5ef65b991edf2a1b
|
Update candidate_party_corrections.py
|
calaccess_processed/candidate_party_corrections.py
|
calaccess_processed/candidate_party_corrections.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes correct party affiliation for candidate in specific contests.
"""
corrections = (
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-primary/updated-contact-info.pdf # noqa
('WINSTON, ALMA MARIE', 2014, 'PRIMARY', 'GOVERNOR', 'REPUBLICAN'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-primary/certified-write-in-list.pdf # noqa
('WALLS, JIMELLE L.', 2014, 'PRIMARY', 'GOVERNOR', 'NO PARTY PREFERENCE'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2012-primary/updated-contact-info-cert-list.pdf # noqa
('ESPINOSA, GEBY E.', 2014, 'PRIMARY', 'ASSEMBLY 24', 'DEMOCRATIC'),
# http://elections.cdn.sos.ca.gov/special-elections/2011-sd28/certified-list.pdf
('VALENTINE, ROBERT S.', 2011, 'SPECIAL ELECTION', 'STATE SENATE 28', 'REPUBLICAN'),
# http://elections.cdn.sos.ca.gov/statewide-elections/2014-general/updated-contact-info.pdf
('KEPHART, GARY', 2014, 'GENERAL', 'STATE SENATE 36', 'DEMOCRATIC'),
)
|
Python
| 0.000001
|
@@ -871,160 +871,139 @@
p://
-elections.cdn.sos.ca.gov/statewide-elections/2014-general/updated-contact-info.pdf%0A ('KEPHART, GARY
+cal-access.sos.ca.gov/Campaign/Candidates/Detail.aspx?id=1273672%0A ('WALDRON, MARIE
', 201
-4, 'GENERAL', 'STATE SENATE 36', 'DEMOCRATIC
+8, 'PRIMARY', 'ASSEMBLY 75', 'REPUBLICAN
'),%0A
|
f37813595d7aa6b67ebf3066636eee5259b180cc
|
Should fix #153
|
post-challenges.py
|
post-challenges.py
|
#!/usr/bin/env python3
"""
Author: Freddie Vargus (github.com/FreddieV4)
File: post_challenges.py
Purpose: Used to pull weekly challenges from r/dailyprogrammer
"""
import re
import os
import praw
from pprint import pprint
NUM_CHALLENGES = 3
debug = False
def db(string):
if debug:
print("DB: ", string)
def get_current_week():
""" Gets 3 challenges, easy, intermediate, hard
for the current week from r/dailyprogrammer
and stores the challenge text in directories
named after the challenge titles
"""
r = praw.Reddit(user_agent="dailyprogrammer-challenges")
sub = r.get_subreddit("dailyprogrammer")
# retrieve generators for top posts
chals = sub.get_new(limit=3)
_chals = sub.get_new(limit=3)
# get challenge titles & selftext
challenge_titles = [str(x.title) for x in chals]
challenge_text = [str(x.selftext) for x in _chals]
# cleanup titles for directory names
title_lst = []
for title in challenge_titles:
t = re.sub(r'\[([0-9\-]+)\]', '', title) # removes datestamp
title_lst.append(t.lstrip())
pprint(title_lst)
# name directories after challenges
# add challenge selftext to directories
for i in range(NUM_CHALLENGES):
os.system('mkdir "{}"'.format(title_lst[i]))
f = open('challenge_text.md', 'w')
f.write(challenge_text[i])
f.close()
os.system('mv challenge_text.md "{}"'.format(title_lst[i]))
#Add a solutions directory to the new challenge directory
os.system('mkdir solutions')
os.system('mv solutions "{}"'.format(title_lst[i]))
os.system("./movedirs.sh")
os.system("./send-data.sh")
def get_all_submissions():
""" Gets all submissions from the entire dailyprogrammer
subreddit and stores their titles and selftexts
in order to initialize the repository
"""
r = praw.Reddit(user_agent="dailyprogrammer-all")
sub = r.search("Challenge #", subreddit="dailyprogrammer", sort="hot", limit=1000, period='all')
_sub = r.search("Challenge #", subreddit="dailyprogrammer", sort="hot", limit=1000, period='all')
# get challenge titles & selftext
challenge_titles = [catch(str(x.title)) for x in sub]
challenge_text = [catch(str(x.selftext)) for x in _sub]
# cleanup titles for directory names
title_lst = []
for title in challenge_titles:
t = re.sub(r'\[([0-9\-\/]+)\]', '', title)
t = re.sub(r'[<>:\"\\\/|?*]', '', title)
title_lst.append(t.lstrip())
print("\nTITLES length", len(title_lst))
print("\n")
#pprint(title_lst)
# name directories after challenges
for i in range(len(challenge_titles)):
os.system('mkdir "{}"'.format(title_lst[i]))
# add challenge selftext to directories
for i in range(len(challenge_titles)):
f = open('challenge_text.md', 'w')
f.write(challenge_text[i])
f.close()
os.system('mv challenge_text.md "{}"'.format(title_lst[i]))
def catch(data):
""" Used to skip over any encoding errors
when using LC for creation of titles and selftext
lists
"""
try:
print(data)
return data
except UnicodeEncodeError as e:
print("\n\n\nYOU'VE HIT THE CATCH!!\n")
return 'trash'
if __name__ == '__main__':
get_current_week()
|
Python
| 0.99935
|
@@ -998,16 +998,83 @@
testamp%0A
+%09%09t = re.sub(r'%5B%3C%3E:%5C%22%5C%5C%5C/%7C?*%5D', '', title) # removes special chars%0A
%09%09title_
|
4674a0ccbba7596cfcfd4cd21e3355b0afaa0c95
|
Fix l10n history movements
|
postatus/status.py
|
postatus/status.py
|
import requests
import sys
import time
def format_time(t):
return time.ctime(t)
def format_short_date(t):
return time.strftime('%m/%d', time.gmtime(t))
# ./bin/l10n_status.py --app=feedback --type=history --highlight=es,pt_BR,po,hu,de,gr,fr,it,ru,ja,tr,zh_TW,zh_CN https://input.mozilla.org/static/l10n_completion.json
class Status(object):
SKIP_LOCALES = ['en_US']
def __init__(self, url, app=None, highlight=None):
self.url = url
self.app = app
self.highlight = highlight or []
self.data = []
self.created = None
def get_data(self):
if self.data:
return
resp = requests.get(self.url)
if resp.status_code != 200:
resp.raise_for_status()
self.data = resp.json()
self.created = format_time(self.data[-1]['created'])
def summary(self):
"""Generates summary data of today's state"""
self.get_data()
highlight = self.highlight
last_item = self.data[-1]
output = {}
output['app'] = self.app or 'All'
data = last_item['locales']
if self.app:
get_data = lambda x: x['apps'][self.app]['percent']
else:
get_data = lambda x: x['percent']
items = [item for item in data.items() if item[0] not in highlight]
hitems = [item for item in data.items() if item[0] in highlight]
highlighted = []
if hitems:
for loc, loc_data in sorted(hitems, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
perc = get_data(loc_data)
highlighted.append((loc, perc))
output['highlighted'] = highlighted
locales = []
for loc, loc_data in sorted(items, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
perc = get_data(loc_data)
locales.append((loc, perc))
output['locales'] = locales
output['created'] = self.created
return output
def _mark_movement(self, data):
"""For each item, converts to a tuple of (movement, item)"""
ret = []
prev_day = None
for i, day in enumerate(data):
if i == 0:
ret.append(('', day))
continue
if prev_day > day:
item = ('down', day)
elif prev_day < day:
item = ('up', day)
elif day < 100:
item = ('equal', day)
prev_day = day
ret.append(item)
return ret
def history(self):
self.get_data()
data = self.data
highlight = self.highlight
app = self.app
# Get a list of the locales we'll iterate through
locales = sorted(data[-1]['locales'].keys())
num_days = 14
# Truncate the data to what we want to look at
data = data[-num_days:]
if app:
get_data = lambda x: x['apps'][app]['percent']
else:
get_data = lambda x: x['percent']
hlocales = [loc for loc in locales if loc in highlight]
locales = [loc for loc in locales if loc not in highlight]
output = {}
output['app'] = self.app or 'All'
output['headers'] = [format_short_date(item['created']) for item in data]
output['highlighted'] = sorted(
(loc, self._mark_movement(get_data(day['locales'][loc]) for day in data))
for loc in hlocales
)
output['locales'] = sorted(
(loc, self._mark_movement(
get_data(day['locales'][loc]) for day in data))
for loc in locales
)
print output['locales']
output['created'] = self.created
return output
|
Python
| 0.006142
|
@@ -2337,16 +2337,47 @@
, day))%0A
+ prev_day = day%0A
@@ -2548,20 +2548,10 @@
el
-if day %3C 100
+se
:%0A
@@ -3793,40 +3793,8 @@
)
-%0A print output%5B'locales'%5D
%0A%0A
|
cc13845684230565fe8bb94f2877001af67a170d
|
Update %%powershell help
|
powershellmagic.py
|
powershellmagic.py
|
"""IPython magics for Windows PowerShell.
"""
__version__ = '0.1'
import atexit
import os
from subprocess import Popen, PIPE
import sys
import tempfile
from IPython.core.magic import (cell_magic, Magics, magics_class)
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring)
@magics_class
class PowerShellMagics(Magics):
"""IPython magics class for Windows PowerShell.
"""
# This class is patterned after
# IPython.core.magics.script.ScriptMagics.
def __init__(self, shell=None):
super(PowerShellMagics, self).__init__(shell=shell)
self._cell_file_name = self._powershell_tempfile()
def _powershell_tempfile(self):
tf = tempfile.NamedTemporaryFile(suffix='.ps1', delete=False)
atexit.register(self._delete_powershell_tempfile)
return tf.name
def _delete_powershell_tempfile(self):
os.remove(self._cell_file_name)
@magic_arguments()
@argument(
'--out',
type=str,
help="Redirect stdout to a variable."
)
@argument(
'--err',
type=str,
help="Redirect stderr to a variable."
)
@cell_magic
def powershell(self, line, cell):
"""Execute a cell written in PowerShell by spawning a process
that invokes the command:
PowerShell -ExecutionPolicy RemoteSigned -File tempfile.ps1
where the argument to '-File' is a file that contains the contents
of the cell.
"""
# This function is patterned after
# IPython.core.magics.ScriptMagics.shebang.
args = parse_argstring(self.powershell, line)
with open(self._cell_file_name, mode='w') as f:
f.write(cell)
cmd = 'PowerShell -ExecutionPolicy RemoteSigned -File {}\r\n'
cmd = cmd.format(self._cell_file_name)
p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate()
out = out.decode()
err = err.decode()
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush()
def load_ipython_extension(ip):
"""Load PowerShellMagics extension"""
ip.register_magics(PowerShellMagics)
|
Python
| 0
|
@@ -1020,16 +1020,21 @@
edirect
+cell
stdout t
@@ -1131,16 +1131,21 @@
edirect
+cell
stderr t
@@ -1238,99 +1238,78 @@
%22%22%22
-Execute a cell written in PowerShell by spawning a process%0A that invokes the command
+Use Windows PowerShell to execute an IPython cell.%0A%0A An example
:%0A%0A
@@ -1322,164 +1322,183 @@
-P
+ In %5B1%5D: %25%25p
ower
-S
+s
hell
- -ExecutionPolicy RemoteSigned -File tempfile.ps1%0A%0A where the argument to '-File' is a file that contains the contents%0A of the cell.
+%0A ...: foreach ($i in 1..3) %7B%0A ...: $i%0A ...: %7D%0A ...:%0A 1%0A 2%0A 3%0A
%0A
|
f3802e794496f3f4ac7c3251f795fd64b3e7ef55
|
Add a missing no-cover to increase coverage under py3
|
pre_commit/util.py
|
pre_commit/util.py
|
from __future__ import unicode_literals
import contextlib
import errno
import functools
import os
import os.path
import shutil
import stat
import subprocess
import tarfile
import tempfile
import pkg_resources
from pre_commit import five
@contextlib.contextmanager
def cwd(path):
original_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_cwd)
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
def memoize_by_cwd(func):
"""Memoize a function call based on os.getcwd()."""
@functools.wraps(func)
def wrapper(*args):
cwd = os.getcwd()
key = (cwd,) + args
try:
return wrapper._cache[key]
except KeyError:
ret = wrapper._cache[key] = func(*args)
return ret
wrapper._cache = {}
return wrapper
@contextlib.contextmanager
def clean_path_on_failure(path):
"""Cleans up the directory on an exceptional failure."""
try:
yield
except BaseException:
if os.path.exists(path):
rmtree(path)
raise
@contextlib.contextmanager
def noop_context():
yield
def shell_escape(arg):
return "'" + arg.replace("'", "'\"'\"'".strip()) + "'"
def no_git_env():
# Too many bugs dealing with environment variables and GIT:
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
# pre-commit hooks
# In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
# while running pre-commit hooks in submodules.
# GIT_DIR: Causes git clone to clone wrong thing
# GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
return dict(
(k, v) for k, v in os.environ.items() if not k.startswith('GIT_')
)
@contextlib.contextmanager
def tarfile_open(*args, **kwargs):
"""Compatibility layer because python2.6"""
tf = tarfile.open(*args, **kwargs)
try:
yield tf
finally:
tf.close()
@contextlib.contextmanager
def tmpdir():
"""Contextmanager to create a temporary directory. It will be cleaned up
afterwards.
"""
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def resource_filename(filename):
return pkg_resources.resource_filename(
'pre_commit',
os.path.join('resources', filename),
)
class CalledProcessError(RuntimeError):
def __init__(self, returncode, cmd, expected_returncode, output=None):
super(CalledProcessError, self).__init__(
returncode, cmd, expected_returncode, output,
)
self.returncode = returncode
self.cmd = cmd
self.expected_returncode = expected_returncode
self.output = output
def to_bytes(self):
output = []
for maybe_text in self.output:
if maybe_text:
output.append(
b'\n ' +
five.to_bytes(maybe_text).replace(b'\n', b'\n ')
)
else:
output.append(b'(none)')
return b''.join((
five.to_bytes(
'Command: {0!r}\n'
'Return code: {1}\n'
'Expected return code: {2}\n'.format(
self.cmd, self.returncode, self.expected_returncode
)
),
b'Output: ', output[0], b'\n',
b'Errors: ', output[1], b'\n',
))
def to_text(self):
return self.to_bytes().decode('UTF-8')
if five.PY3: # pragma: no cover
__bytes__ = to_bytes
__str__ = to_text
else:
__str__ = to_bytes
__unicode__ = to_text
def cmd_output(*cmd, **kwargs):
retcode = kwargs.pop('retcode', 0)
stdin = kwargs.pop('stdin', None)
encoding = kwargs.pop('encoding', 'UTF-8')
__popen = kwargs.pop('__popen', subprocess.Popen)
popen_kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if stdin is not None:
stdin = stdin.encode('UTF-8')
# py2/py3 on windows are more strict about the types here
cmd = [five.n(arg) for arg in cmd]
kwargs['env'] = dict(
(five.n(key), five.n(value))
for key, value in kwargs.pop('env', {}).items()
) or None
popen_kwargs.update(kwargs)
proc = __popen(cmd, **popen_kwargs)
stdout, stderr = proc.communicate(stdin)
if encoding is not None and stdout is not None:
stdout = stdout.decode(encoding)
if encoding is not None and stderr is not None:
stderr = stderr.decode(encoding)
returncode = proc.returncode
if retcode is not None and retcode != returncode:
raise CalledProcessError(
returncode, cmd, retcode, output=(stdout, stderr),
)
return proc.returncode, stdout, stderr
def rmtree(path):
"""On windows, rmtree fails for readonly dirs."""
def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno == errno.EACCES
):
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
|
Python
| 0.000002
|
@@ -3682,16 +3682,22 @@
no cover
+ (py3)
%0A
@@ -3745,32 +3745,58 @@
o_text%0A else:
+ # pragma: no cover (py2)
%0A __str__
|
af3ddbf32379ecf96224746eb59d0685be9586ae
|
Simplify compression
|
src/mock_vws/_mock_web_query_api.py
|
src/mock_vws/_mock_web_query_api.py
|
"""
A fake implementation of the Vuforia Web Query API.
See
https://library.vuforia.com/articles/Solution/How-To-Perform-an-Image-Recognition-Query
"""
import email.utils
import gzip
import uuid
from io import BytesIO
from typing import Callable, List, Set
from requests_mock import POST
from requests_mock.request import _RequestObjectProxy
from requests_mock.response import _Context
from mock_vws._constants import ResultCodes
from mock_vws._mock_common import Route, json_dump
ROUTES = set([])
def route(
path_pattern: str,
http_methods: List[str],
) -> Callable[..., Callable]:
"""
Register a decorated method so that it can be recognized as a route.
Args:
path_pattern: The end part of a URL pattern. E.g. `/targets` or
`/targets/.+`.
http_methods: HTTP methods that map to the route function.
"""
def decorator(method: Callable[..., str]) -> Callable[..., str]:
"""
Register a decorated method so that it can be recognized as a route.
Args:
method: Method to register.
Returns:
The given `method` with multiple changes, including added
validators.
"""
ROUTES.add(
Route(
route_name=method.__name__,
path_pattern=path_pattern,
http_methods=http_methods,
)
)
return method
return decorator
class MockVuforiaWebQueryAPI:
"""
A fake implementation of the Vuforia Web Query API.
This implementation is tied to the implementation of `requests_mock`.
"""
def __init__(self, ) -> None:
"""
Attributes:
routes: The `Route`s to be used in the mock.
"""
self.routes: Set[Route] = ROUTES
@route(path_pattern='/v1/query', http_methods=[POST])
def query( # pylint: disable=no-self-use
self,
request: _RequestObjectProxy, # pylint: disable=unused-argument
context: _Context, # pylint: disable=unused-argument
) -> str:
"""
Perform an image recognition query.
"""
results: List[str] = []
body = {
'result_code': ResultCodes.SUCCESS.value,
'results': results,
'query_id': uuid.uuid4().hex,
}
text = json_dump(body)
context.headers['Content-Encoding'] = 'gzip'
date = email.utils.formatdate(None, localtime=False, usegmt=True)
context.headers['Date'] = date
out = BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as f:
f.write(text.encode())
value = out.getvalue()
context.headers['Content-Length'] = str(len(value))
return out.getvalue()
|
Python
| 0.000068
|
@@ -194,31 +194,8 @@
uid%0A
-from io import BytesIO%0A
from
@@ -2029,19 +2029,21 @@
) -%3E
-str
+bytes
:%0A
@@ -2501,145 +2501,43 @@
-out = BytesIO()%0A with gzip.GzipFile(fileobj=out, mode='w') as f:%0A f.write(text.encode())%0A%0A value = out.getvalue(
+value = gzip.compress(text.encode()
)%0A
@@ -2613,19 +2613,10 @@
urn
-out.get
value
-()
%0A
|
2b7e95d7964bd194c509ca3b69f5d7a324df1679
|
Switch argument from function_path to path
|
furious/job_utils.py
|
furious/job_utils.py
|
#
# Copyright 2012 WebFilings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Functions to help with encoding and decoding job information.
"""
import sys
class BadFunctionPathError(Exception):
"""Invalid function path."""
def get_function_path_and_options(function):
"""Validates `function` is a potentially valid path or reference to
a function and returns the cleansed path to the function.
Strings are checked to ensure they conform to Python's identifier
requirements:
http://docs.python.org/reference/lexical_analysis.html#identifiers
Functions passed by reference must be at the top-level of the containing
module.
Returns the function path and options.
"""
# Try to pop the options off whatever they passed in.
options = getattr(function, '_async_options', None)
if isinstance(function, basestring):
# This is a function name in str form.
import re
if not re.match(r'^[^\d\W]([a-zA-Z._]|((?<!\.)\d))+$', function):
raise BadFunctionPathError(
'Invalid function path, must meet Python\'s identifier '
'requirements, passed value was "%s".', function)
return function, options
if callable(function):
# Try to figure out the path to the function.
try:
parts = [function.__module__]
if hasattr(function, 'im_class'):
parts.append(function.im_class.__name__)
parts.append(function.func_name)
return ('.'.join(parts), options)
except AttributeError:
if function.__module__ == '__builtin__':
return function.__name__, options
raise BadFunctionPathError("Unable to determine path to callable.")
raise BadFunctionPathError("Must provide a function path or reference.")
def path_to_reference(function_path):
"""Convert a function path reference to a reference."""
# By default JSON decodes strings as unicode. The Python __import__ does
# not like that choice. So we'll just cast all function paths to a string.
# NOTE: that there is no corresponding unit test for the classmethod
# version of this problem. It only impacts importing modules.
function_path = str(function_path)
if '.' not in function_path:
try:
return globals()["__builtins__"][function_path]
except KeyError:
try:
return getattr(globals()["__builtins__"], function_path)
except AttributeError:
pass
try:
return globals()[function_path]
except KeyError:
pass
raise BadFunctionPathError(
'Unable to find function "%s".' % (function_path,))
module_path, function_name = function_path.rsplit('.', 1)
if module_path in sys.modules:
module = sys.modules[module_path]
else:
try:
module = __import__(name=module_path,
fromlist=[function_name])
except ImportError:
module_path, class_name = module_path.rsplit('.', 1)
module = __import__(name=module_path, fromlist=[class_name])
module = getattr(module, class_name)
try:
return getattr(module, function_name)
except AttributeError:
raise BadFunctionPathError(
'Unable to find function "%s".' % (function_path,))
def encode_callbacks(callbacks):
"""Encode callbacks to as a dict suitable for JSON encoding."""
from .async import Async
if not callbacks:
return
encoded_callbacks = {}
for event, callback in callbacks.iteritems():
if callable(callback):
callback, _ = get_function_path_and_options(callback)
elif isinstance(callback, Async):
callback = callback.to_dict()
encoded_callbacks[event] = callback
return encoded_callbacks
def decode_callbacks(encoded_callbacks):
"""Decode the callbacks to an executable form."""
from .async import Async
callbacks = {}
for event, callback in encoded_callbacks.iteritems():
if isinstance(callback, dict):
callback = Async.from_dict(callback)
else:
callback = path_to_reference(callback)
callbacks[event] = callback
return callbacks
|
Python
| 0.000005
|
@@ -2368,25 +2368,16 @@
ference(
-function_
path):%0A
@@ -2736,25 +2736,16 @@
es.%0A
-function_
path = s
@@ -2747,25 +2747,16 @@
h = str(
-function_
path)%0A%0A
@@ -2772,25 +2772,16 @@
not in
-function_
path:%0A
@@ -2836,25 +2836,16 @@
ins__%22%5D%5B
-function_
path%5D%0A
@@ -2942,25 +2942,16 @@
ns__%22%5D,
-function_
path)%0A
@@ -3047,25 +3047,16 @@
obals()%5B
-function_
path%5D%0A
@@ -3175,33 +3175,24 @@
n %22%25s%22.' %25 (
-function_
path,))%0A%0A
@@ -3221,25 +3221,16 @@
_name =
-function_
path.rsp
@@ -3832,25 +3832,16 @@
s%22.' %25 (
-function_
path,))%0A
|
b19acf693b1b428fdc3440fa3dd78cb5cf173ba4
|
Add the needed wait for ajax statements to checklist acceptance test methods
|
cms/djangoapps/contentstore/features/checklists.py
|
cms/djangoapps/contentstore/features/checklists.py
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from nose.tools import assert_true, assert_equal # pylint: disable=E0611
from terrain.steps import reload_the_page
from selenium.common.exceptions import StaleElementReferenceException
############### ACTIONS ####################
@step('I select Checklists from the Tools menu$')
def i_select_checklists(step):
world.click_tools()
link_css = 'li.nav-course-tools-checklists a'
world.css_click(link_css)
@step('I have opened Checklists$')
def i_have_opened_checklists(step):
step.given('I have opened a new course in Studio')
step.given('I select Checklists from the Tools menu')
@step('I see the four default edX checklists$')
def i_see_default_checklists(step):
checklists = world.css_find('.checklist-title')
assert_equal(4, len(checklists))
assert_true(checklists[0].text.endswith('Getting Started With Studio'))
assert_true(checklists[1].text.endswith('Draft a Rough Course Outline'))
assert_true(checklists[2].text.endswith("Explore edX\'s Support Tools"))
assert_true(checklists[3].text.endswith('Draft Your Course About Page'))
@step('I can check and uncheck tasks in a checklist$')
def i_can_check_and_uncheck_tasks(step):
# Use the 2nd checklist as a reference
verifyChecklist2Status(0, 7, 0)
toggleTask(1, 0)
verifyChecklist2Status(1, 7, 14)
toggleTask(1, 3)
verifyChecklist2Status(2, 7, 29)
toggleTask(1, 6)
verifyChecklist2Status(3, 7, 43)
toggleTask(1, 3)
verifyChecklist2Status(2, 7, 29)
@step('the tasks are correctly selected$')
def tasks_correctly_selected(step):
verifyChecklist2Status(2, 7, 29)
# verify that task 7 is still selected by toggling its checkbox state and making sure that it deselects
world.browser.execute_script("window.scrollBy(0,1000)")
toggleTask(1, 6)
verifyChecklist2Status(1, 7, 14)
@step('I select a link to the course outline$')
def i_select_a_link_to_the_course_outline(step):
clickActionLink(1, 0, 'Edit Course Outline')
@step('I am brought to the course outline page$')
def i_am_brought_to_course_outline(step):
assert world.is_css_present('body.view-outline')
assert_equal(1, len(world.browser.windows))
@step('I am brought back to the course outline in the correct state$')
def i_am_brought_back_to_course_outline(step):
step.given('I see the four default edX checklists')
# In a previous step, we selected (1, 0) in order to click the 'Edit Course Outline' link.
# Make sure the task is still showing as selected (there was a caching bug with the collection).
verifyChecklist2Status(1, 7, 14)
@step('I select a link to help page$')
def i_select_a_link_to_the_help_page(step):
clickActionLink(2, 0, 'Visit Studio Help')
@step('I am brought to the help page in a new window$')
def i_am_brought_to_help_page_in_new_window(step):
step.given('I see the four default edX checklists')
windows = world.browser.windows
assert_equal(2, len(windows))
world.browser.switch_to_window(windows[1])
assert_equal('http://help.edge.edx.org/', world.browser.url)
############### HELPER METHODS ####################
def verifyChecklist2Status(completed, total, percentage):
def verify_count(driver):
try:
statusCount = world.css_find('#course-checklist1 .status-count').first
return statusCount.text == str(completed)
except StaleElementReferenceException:
return False
world.wait_for(verify_count)
assert_equal(str(total), world.css_find('#course-checklist1 .status-amount').first.text)
# Would like to check the CSS width, but not sure how to do that.
assert_equal(str(percentage), world.css_find('#course-checklist1 .viz-checklist-status-value .int').first.text)
def toggleTask(checklist, task):
world.css_click('#course-checklist' + str(checklist) + '-task' + str(task))
# TODO: figure out a way to do this in phantom and firefox
# For now we will mark the scenerios that use this method as skipped
def clickActionLink(checklist, task, actionText):
# text will be empty initially, wait for it to populate
def verify_action_link_text(driver):
actualText = world.css_text('#course-checklist' + str(checklist) + ' a', index=task)
if actualText == actionText:
return True
else:
# toggle checklist item to make sure that the link button is showing
toggleTask(checklist, task)
return False
world.wait_for(verify_action_link_text)
world.css_click('#course-checklist' + str(checklist) + ' a', index=task)
|
Python
| 0
|
@@ -492,16 +492,51 @@
nk_css)%0A
+ world.wait_for_ajax_complete()%0A
%0A%0A@step(
@@ -3971,16 +3971,51 @@
(task))%0A
+ world.wait_for_ajax_complete()%0A
%0A%0A# TODO
@@ -4703,28 +4703,63 @@
cklist) + ' a', index=task)%0A
+ world.wait_for_ajax_complete()%0A
|
00500833f354158cc5fb918fbdc70d8ede7b3d09
|
Fix and improve debugging of Optparse test.
|
tests/test_django1_10_fixers.py
|
tests/test_django1_10_fixers.py
|
from __future__ import absolute_import, print_function, unicode_literals
import _test_utilities
import pytest
def test_fix_incoming_urls_submodule():
from django.urls import (
get_callable,
RegexURLPattern,
RegexURLResolver,
NoReverseMatch,
)
assert get_callable
def test_fix_deletion_templatetags_future():
from compat import render_to_string
from django.templatetags.future import cycle, firstof
rendered = render_to_string("core_tags/test_future_cycle_and_firstof.html")
assert rendered.strip() == "row1\nA"
def test_fix_deletion_template_defaulttags_ssi():
# already tested in "test_fix_deletion_templatetags_future_ssi()"
from django.template.defaulttags import ssi
assert callable(ssi)
def test_fix_behaviour_urls_resolvers_RegexURLPattern():
from django.core.urlresolvers import RegexURLPattern
has_lookup_str = hasattr(RegexURLPattern, "lookup_str")
def dummy_view(request):
return 72627
pattern = RegexURLPattern("homepage/", dummy_view)
assert pattern.callback is dummy_view
pattern.add_prefix("muyprefix")
assert pattern.callback is dummy_view
if has_lookup_str:
assert pattern.lookup_str.endswith(".dummy_view") # complex on py3k
pattern = RegexURLPattern("homepage/", "test_project.views.my_view")
assert pattern.callback.__name__ == "my_view"
if has_lookup_str:
assert pattern.lookup_str == "test_project.views.my_view"
pattern.add_prefix("myprefix")
try:
pattern.callback # bad prefix now
except ImportError:
pass # might or not raise, depending on django version (caching or not)
if has_lookup_str:
# our own "lookup_str" property bypasses the original, CACHED, one
assert pattern.lookup_str == "myprefix.test_project.views.my_view"
pattern = RegexURLPattern("homepage/", "my_view")
with pytest.raises(ImportError):
myvar = pattern.callback # missing prefix
if has_lookup_str:
assert pattern.lookup_str == "my_view" # missing prefix but works
pattern.add_prefix("test_project.views")
assert pattern.callback.__name__ == "my_view"
if has_lookup_str:
assert pattern.lookup_str == "test_project.views.my_view"
def test_fix_behaviour_core_urlresolvers_reverse_with_prefix():
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
view = reverse("homepage") # by view name
assert view == "/homepage/"
view = reverse("test_project.views.my_view") # by dotted path
assert view == "/my_view/"
def test_fix_behaviour_conf_urls_url():
from django.conf.urls import url
url(r"^admin2/", "test_project.views.my_view", name="test_admin_abc"),
def test_fix_deletion_conf_urls_patterns():
import django.conf.urls
from django.conf.urls import patterns, url
patterns(
"admin",
(r"^admin1/", "test_project.views.my_view"),
url(r"^admin2/", "test_project.views.my_view", name="test_admin_other"),
)
assert "patterns" in django.conf.urls.__all__
def test_fix_behaviour_template_smartif_OPERATORS_equals():
from compat import render_to_string
rendered = render_to_string("core_tags/test_smartif_operators.html", dict(a=3))
assert rendered.strip() == "hello\nbye"
def test_fix_behaviour_core_management_parser_optparse():
from django.core import management
from six import StringIO
from django.test.utils import captured_stderr, captured_stdout
out = StringIO()
management.call_command('optparse_cmd', stdout=out)
assert out.getvalue() == "All right, let's dance Rock'n'Roll.\n"
from django.core.management import BaseCommand
needs_skip_checks = hasattr(BaseCommand, "requires_system_checks")
# Simulate command line execution
with captured_stdout() as stdout, captured_stderr():
# We skip checks since test project is not complete
management.execute_from_command_line(['django-admin', 'optparse_cmd'] +
["--skip-checks"] if needs_skip_checks else [])
assert stdout.getvalue() == "All right, let's dance Rock'n'Roll.\n"
with captured_stdout() as stdout, captured_stderr():
# Check that new, argparse-based commands, still work as intended!
management.execute_from_command_line(['django-admin', 'diffsettings'])
assert "ALLOWED_HOSTS" in stdout.getvalue()
|
Python
| 0
|
@@ -3431,24 +3431,55 @@
optparse():%0A
+ from django import VERSION%0A
from dja
@@ -3602,16 +3602,378 @@
stdout%0A%0A
+ with captured_stdout() as stdout, captured_stderr():%0A # Check that new, argparse-based commands, still work as intended!%0A management.execute_from_command_line(%5B'django-admin', 'diffsettings'%5D)%0A stdout_value = stdout.getvalue()%0A print(stdout_value)%0A assert %22ALLOWED_HOSTS%22 in stdout_value%0A%0A # Check that optparse-based command works%0A
out
@@ -4115,131 +4115,8 @@
n%22%0A%0A
- from django.core.management import BaseCommand%0A needs_skip_checks = hasattr(BaseCommand, %22requires_system_checks%22)%0A%0A
@@ -4278,45 +4278,68 @@
-management.execute_from_comman
+needs_skip_checks = (VERSION %3E= (1, 10))%0A cm
d_line
-(
+ =
%5B'dj
@@ -4372,54 +4372,10 @@
'%5D +
-%0A
+(
%5B%22--
@@ -4426,332 +4426,185 @@
-assert stdout.getvalue() == %22All right, let's dance Rock'n'Roll.%5Cn%22%0A%0A with captured_stdout() as stdout, captured_stderr():%0A # Check that new, argparse-based commands, still work as intended!%0A management.execute_from_command_line(%5B'django-admin', 'diffsettings'%5D)%0A assert %22ALLOWED_HOSTS%22 in stdout.getvalue()
+ management.execute_from_command_line(cmd_line)%0A stdout_value = stdout.getvalue()%0A print(stdout_value)%0A assert stdout_value == %22All right, let's dance Rock'n'Roll.%5Cn%22%0A%0A
%0A
|
c830eb5b6b1581ccd3c6c9d80cb48cddc22bb26b
|
Update interop html template location
|
tools/run_tests/python_utils/report_utils.py
|
tools/run_tests/python_utils/report_utils.py
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate XML and HTML test reports."""
from __future__ import print_function
try:
from mako.runtime import Context
from mako.template import Template
from mako import exceptions
except (ImportError):
pass # Mako not installed but it is ok.
import os
import string
import xml.etree.cElementTree as ET
def _filter_msg(msg, output_format):
"""Filters out nonprintable and illegal characters from the message."""
if output_format in ['XML', 'HTML']:
# keep whitespaces but remove formfeed and vertical tab characters
# that make XML report unparseable.
filtered_msg = filter(
lambda x: x in string.printable and x != '\f' and x != '\v',
msg.decode('UTF-8', 'ignore'))
if output_format == 'HTML':
filtered_msg = filtered_msg.replace('"', '"')
return filtered_msg
else:
return msg
def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
suite_name='tests'):
"""Generate JUnit-like XML report."""
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package=suite_package,
name=suite_name)
for shortname, results in resultset.iteritems():
for result in results:
xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
if result.elapsed_time:
xml_test.set('time', str(result.elapsed_time))
ET.SubElement(xml_test, 'system-out').text = _filter_msg(result.message,
'XML')
if result.state == 'FAILED':
ET.SubElement(xml_test, 'failure', message='Failure')
elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout')
elif result.state == 'SKIPPED':
ET.SubElement(xml_test, 'skipped', message='Skipped')
tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8')
def render_interop_html_report(
client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
resultset, num_failures, cloud_to_prod, prod_servers, http2_interop):
"""Generate HTML report for interop tests."""
template_file = 'tools/run_tests/interop_html_report.template'
try:
mytemplate = Template(filename=template_file, format_exceptions=True)
except NameError:
print('Mako template is not installed. Skipping HTML report generation.')
return
except IOError as e:
print('Failed to find the template %s: %s' % (template_file, e))
return
sorted_test_cases = sorted(test_cases)
sorted_auth_test_cases = sorted(auth_test_cases)
sorted_http2_cases = sorted(http2_cases)
sorted_client_langs = sorted(client_langs)
sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers)
args = {'client_langs': sorted_client_langs,
'server_langs': sorted_server_langs,
'test_cases': sorted_test_cases,
'auth_test_cases': sorted_auth_test_cases,
'http2_cases': sorted_http2_cases,
'resultset': resultset,
'num_failures': num_failures,
'cloud_to_prod': cloud_to_prod,
'prod_servers': sorted_prod_servers,
'http2_interop': http2_interop}
html_report_out_dir = 'reports'
if not os.path.exists(html_report_out_dir):
os.mkdir(html_report_out_dir)
html_file_path = os.path.join(html_report_out_dir, 'index.html')
try:
with open(html_file_path, 'w') as output_file:
mytemplate.render_context(Context(output_file, **args))
except:
print(exceptions.text_error_template().render())
raise
def render_perf_profiling_results(output_filepath, profile_names):
with open(output_filepath, 'w') as output_file:
output_file.write('<ul>\n')
for name in profile_names:
output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
output_file.write('</ul>\n')
|
Python
| 0
|
@@ -3705,24 +3705,32 @@
ests/interop
+/interop
_html_report
|
524ccb07e924f4b21ddf53bc204385c0596bd297
|
Make record_wpr respect Tests before Measurements.
|
tools/telemetry/telemetry/page/record_wpr.py
|
tools/telemetry/telemetry/page/record_wpr.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import tempfile
import time
from telemetry import test
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.core import wpr_modes
from telemetry.page import page_measurement
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import test_expectations
class RecordPage(page_test.PageTest):
def __init__(self, measurements):
# This class overwrites PageTest.Run, so that the test method name is not
# really used (except for throwing an exception if it doesn't exist).
super(RecordPage, self).__init__('Run')
self._action_names = set(
[measurement().action_name_to_run
for measurement in measurements.values()
if measurement().action_name_to_run])
def CanRunForPage(self, page):
return page.url.startswith('http')
def CustomizeBrowserOptionsForPage(self, page, options):
for compound_action in self._CompoundActionsForPage(page):
for action in compound_action:
action.CustomizeBrowserOptions(options)
def Run(self, options, page, tab, results):
# When recording, sleep to catch any resources that load post-onload.
tab.WaitForDocumentReadyStateToBeComplete()
time.sleep(3)
# Run the actions for all measurements. Reload the page between
# actions.
should_reload = False
for compound_action in self._CompoundActionsForPage(page):
if should_reload:
tab.Navigate(page.url)
tab.WaitForDocumentReadyStateToBeComplete()
self._RunCompoundAction(page, tab, compound_action)
should_reload = True
def _CompoundActionsForPage(self, page):
actions = []
for action_name in self._action_names:
if not hasattr(page, action_name):
continue
actions.append(page_test.GetCompoundActionFromPage(page, action_name))
return actions
def Main(base_dir):
measurements = discover.DiscoverClasses(base_dir, base_dir,
page_measurement.PageMeasurement)
tests = discover.DiscoverClasses(base_dir, base_dir, test.Test,
index_by_class_name=True)
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog <PageSet|Measurement|Test>')
page_runner.AddCommandLineOptions(parser)
recorder = RecordPage(measurements)
recorder.AddCommandLineOptions(parser)
recorder.AddOutputOptions(parser)
_, args = parser.parse_args()
if len(args) != 1:
parser.print_usage()
sys.exit(1)
if args[0].endswith('.json'):
ps = page_set.PageSet.FromFile(args[0])
elif args[0] in measurements:
ps = measurements[args[0]]().CreatePageSet(args, options)
elif args[0] in tests:
ps = tests[args[0]]().CreatePageSet(options)
else:
parser.print_usage()
sys.exit(1)
expectations = test_expectations.TestExpectations()
# Set the archive path to something temporary.
temp_target_wpr_file_path = tempfile.mkstemp()[1]
ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path)
# Do the actual recording.
options.wpr_mode = wpr_modes.WPR_RECORD
options.no_proxy_server = True
recorder.CustomizeBrowserOptions(options)
results = page_runner.Run(recorder, ps, expectations, options)
if results.errors or results.failures:
logging.warning('Some pages failed. The recording has not been updated for '
'these pages.')
logging.warning('Failed pages:\n%s',
'\n'.join(zip(*results.errors + results.failures)[0]))
if results.skipped:
logging.warning('Some pages were skipped. The recording has not been '
'updated for these pages.')
logging.warning('Skipped pages:\n%s', '\n'.join(zip(*results.skipped)[0]))
if results.successes:
# Update the metadata for the pages which were recorded.
ps.wpr_archive_info.AddRecordedPages(results.successes)
else:
os.remove(temp_target_wpr_file_path)
return min(255, len(results.failures))
|
Python
| 0.000003
|
@@ -2873,26 +2873,19 @@
s%5B0%5D in
-measuremen
+tes
ts:%0A
@@ -2889,26 +2889,19 @@
ps =
-measuremen
+tes
ts%5Bargs%5B
@@ -2924,14 +2924,8 @@
Set(
-args,
opti
@@ -2939,35 +2939,42 @@
elif args%5B0%5D in
-tes
+measuremen
ts:%0A ps = tes
@@ -2962,35 +2962,42 @@
ments:%0A ps =
-tes
+measuremen
ts%5Bargs%5B0%5D%5D().Cr
@@ -3000,32 +3000,38 @@
).CreatePageSet(
+args,
options)%0A else:
|
c5ccf36fbeb6b744918e3090422763103b181de8
|
Fix name (copy paste fail...)
|
tests/test_tripleStandardize.py
|
tests/test_tripleStandardize.py
|
import json
from ppp_nlp_classical import Triple, TriplesBucket, computeTree, simplify, buildBucket, DependenciesTree, tripleProduce1, tripleProduce2, tripleProduce3, buildTree
from ppp_datamodel import Triple, Resource, Missing
import data
from unittest import TestCase
class StandardTripleTests(TestCase):
def testBuildBucket(self):
tree = computeTree(data.give_president_of_USA()['sentences'][0])
qw = simplify(tree)
triple = buildTree(buildBucket(tree,qw))
self.assertIsInstance(triple,Triple)
self.assertEqual(triple.get("predicate"),Resource("identity"))
self.assertEqual(triple.get("object"),Missing())
subj=triple.get("subject")
self.assertEqual(subj.get("subject"),Missing())
self.assertEqual(subj.get("predicate"),Resource("president of"))
self.assertEqual(subj.get("object"),Resource("United States"))
|
Python
| 0
|
@@ -318,24 +318,28 @@
ef testBuild
+From
Bucket(self)
|
fdd4a88c7a7981e8df1dd7da150b164c8121d4be
|
Add more youtube and vimeo links for testing
|
tests/testapp/test_embedding.py
|
tests/testapp/test_embedding.py
|
from django.test import TestCase
from feincms3 import embedding
class EmbeddingTest(TestCase):
def test_no_handlers(self):
"""Embed video without handlers"""
self.assertEqual(embedding.embed_video("stuff"), None)
def test_youtube(self):
"""Test a youtube link"""
self.assertEqual(
embedding.embed_video("https://www.youtube.com/watch?v=dQw4w9WgXcQ"),
"""\
<div class="responsive-embed widescreen youtube"><iframe \
src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
def test_vimeo(self):
self.assertEqual(
embedding.embed_video("https://vimeo.com/455728498"),
"""\
<div class="responsive-embed widescreen vimeo"><iframe \
src="https://player.vimeo.com/video/455728498" frameborder="0" \
allow="autoplay; fullscreen" allowfullscreen=""></iframe></div>""",
)
|
Python
| 0
|
@@ -629,240 +629,1325 @@
-def test_vimeo(self):%0A self.assertEqual(%0A embedding.embed_video(%22https://vimeo.com/455728498%22),%0A %22%22%22%5C%0A%3Cdiv class=%22responsive-embed widescreen vimeo%22%3E%3Ciframe %5C%0Asrc=%22https://player.vimeo.com/video/45572849
+ self.assertEqual(%0A embedding.embed_video(%22https://youtu.be/y7-s5ZvC_2A%22),%0A %22%22%22%5C%0A%3Cdiv class=%22responsive-embed widescreen youtube%22%3E%3Ciframe %5C%0Asrc=%22https://www.youtube.com/embed/y7-s5ZvC_2A%22 frameborder=%220%22 %5C%0Aallow=%22autoplay; fullscreen%22 allowfullscreen=%22%22%3E%3C/iframe%3E%3C/div%3E%22%22%22,%0A )%0A%0A self.assertTrue(%0A embedding.embed_video(%0A %22https://www.youtube.com/watch?v=4zGnNmncJWg&feature=emb_title%22%0A )%0A )%0A self.assertTrue(%0A embedding.embed_video(%0A %22https://www.youtube.com/watch?v=DYu_bGbZiiQ&list=RDJMOOG7rWTPg&index=7%22%0A )%0A )%0A%0A def test_vimeo(self):%0A self.assertEqual(%0A embedding.embed_video(%22https://vimeo.com/455728498%22),%0A %22%22%22%5C%0A%3Cdiv class=%22responsive-embed widescreen vimeo%22%3E%3Ciframe %5C%0Asrc=%22https://player.vimeo.com/video/455728498%22 frameborder=%220%22 %5C%0Aallow=%22autoplay; fullscreen%22 allowfullscreen=%22%22%3E%3C/iframe%3E%3C/div%3E%22%22%22,%0A )%0A%0A self.assertTrue(%0A embedding.embed_video(%22https://player.vimeo.com/video/417955670%22)%0A )%0A%0A self.assertEqual(%0A embedding.embed_video(%22https://vimeo.com/12345678/3213124324%22),%0A %22%22%22%5C%0A%3Cdiv class=%22responsive-embed widescreen vimeo%22%3E%3Ciframe %5C%0Asrc=%22https://player.vimeo.com/video/1234567
8%22 f
|
9b4af9cc7864be8ed38f34628ae012c9e5b4e4c1
|
Fix person tests
|
zou/app/services/index_service.py
|
zou/app/services/index_service.py
|
from pathlib import Path
from zou.app.utils import indexing
from zou.app import app
from zou.app.index_schema import asset_schema, person_schema
from zou.app.services import (
assets_service,
persons_service,
projects_service
)
def get_index(index_name):
"""
Retrieve whoosh index from disk. It is required to perform any operations.
"""
return indexing.get_index(Path(app.config["INDEXES_FOLDER"]) / index_name)
def get_asset_index():
return get_index("assets")
def get_person_index():
return get_index("persons")
def reset_index():
"""
Delete index and rebuild it by looping on all the assets listed in the
database.
"""
reset_asset_index()
reset_person_index()
def reset_entry_index(index_name, schema, get_entries, index_entry):
"""
Clear and rebuild index for given parameters: folder name of the index,
schema, func to get entries to index, func to index a given entry.
"""
index_path = Path(app.config["INDEXES_FOLDER"]) / index_name
index = indexing.create_index(index_path, schema)
entries = get_entries()
for entry in entries:
index_entry(entry, index=index)
print(len(entries), "%s indexed" % index_name)
def remove_entry_index(index, entry_id):
"""
Remove document matching given id from given index.
"""
index_writer = index.writer()
index_writer.delete_by_term("id", entry_id)
index_writer.commit()
return entry_id
def reset_asset_index():
reset_entry_index(
"assets",
asset_schema,
assets_service.get_all_raw_assets,
index_asset
)
def reset_person_index():
reset_entry_index(
"persons",
person_schema,
persons_service.get_all_raw_active_persons,
index_person
)
def search_assets(query, project_ids=[], limit=3):
"""
Perform a search on the index. The query is a simple string. The result is
a list of assets with extra data like the project name and the asset type
name (3 results maximum by default).
"""
index = get_asset_index()
assets = []
ids = indexing.search(index, query, project_ids, limit=limit)
for asset_id in ids:
asset = assets_service.get_asset(asset_id)
asset_type = assets_service.get_asset_type(asset["entity_type_id"])
project = projects_service.get_project(asset["project_id"])
asset["project_name"] = project["name"]
asset["asset_type_name"] = asset_type["name"]
assets.append(asset)
return assets
def search_persons(query, limit=3):
"""
Perform a search on the index. The query is a simple string. The result is
a list of persons (3 results maximum by default).
"""
index = get_person_index()
persons = []
ids = indexing.search(index, query, limit=limit)
for person_id in ids:
person = persons_service.get_person(person_id)
persons.append(person)
return persons
def index_asset(asset, index=None):
"""
Register asset into the index.
"""
if index is None:
index = get_asset_index()
return indexing.index_data(index, {
"name": asset.name,
"project_id": str(asset.project_id),
"episode_id": str(asset.source_id),
"id": str(asset.id)
})
def index_person(person, index=None):
"""
Register person into the index.
"""
if index is None:
index = get_person_index()
return indexing.index_data(index, {
"name": person.full_name(),
"id": str(person.id)
})
def remove_asset_index(asset_id):
"""
Remove document matching given asset id from asset index.
"""
return remove_entry_index(get_asset_index(), asset_id)
def remove_person_index(person_id):
"""
Remove document matching given person id from person index.
"""
return remove_entry_index(get_person_index(), person_id)
|
Python
| 0.000001
|
@@ -3730,16 +3730,20 @@
ndex(),
+str(
asset_id
@@ -3743,16 +3743,17 @@
sset_id)
+)
%0A%0A%0Adef r
@@ -3913,16 +3913,20 @@
ndex(),
+str(
person_i
@@ -3919,17 +3919,18 @@
, str(person_id)
+)
%0A
|
0991a09a3678818683614a6735c9c46379b13a96
|
Fix unicode representation
|
gasistafelice/rest/views/blocks/__init__.py
|
gasistafelice/rest/views/blocks/__init__.py
|
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.shortcuts import render_to_response
from django.contrib.auth.models import User
from gasistafelice.globals import type_model_d
from gasistafelice.rest.models import BlockConfiguration
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
class AbstractBlock(object):
BLOCK_NAME = "default name"
BLOCK_DESCRIPTION = _("default description")
BLOCK_VALID_RESOURCE_TYPES = None
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def __init__(self):
self.app = 'rest'
self.loc = 'body'
self.name = self.BLOCK_NAME
self.description = self.BLOCK_DESCRIPTION
self.auto_refresh = False
self.refresh_rate = 0
self.start_open = False
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
@property
def block_name(self):
return self.name
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def _get_user_actions(self, request):
return []
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def is_valid(self, resource_type):
"""
Returns true if the block is valid for the given resource_type.
If class attribute BLOCK_VALID_RESOURCE_TYPES is None
it means that it is valid for ALL KIND OF RESOURCES
"""
if self.BLOCK_VALID_RESOURCE_TYPES is None:
rv = True
else:
rv = resource_type in self.BLOCK_VALID_RESOURCE_TYPES
return rv
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def visible_in_page(self):
"""
Return true if the block can be added in user page.
"""
return True
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def get_description(self):
return self.description
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def options_response(self, request, resource_type, resource_id):
# Return options for each block (like filtering contents...)
#
# No options by default
#
ctx={
#'block_name' : 'Details',
'fields': []
}
return render_to_response('options.xml', ctx)
def validate_options(self, options_dict):
# return no errors for user options
return None
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def get_response(self, request, resource_type, resource_id, args):
return ""
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
def create_block_signature(self, resource_type, resource_id):
resource_class = type_model_d[resource_type]
resource = resource_class.objects.get(pk=int(resource_id))
return self.create_block_signature_from_resource(resource)
def create_block_signature_from_resource(self, resource):
block_urn = '%s/%s/%s/' % (resource.resource_type, resource.id, self.name)
return '<block \
block_name="%s" \
block_description="%s" \
\
block_urn="%s" \
resource_name="%s" \
\
refresh_rate="%s" \
auto_refresh="%s" \
start_open="%s" \
/>' % (
self.block_name,
'%s' % (str(self.get_description())),
block_urn,
str(resource),
self.refresh_rate,
str(self.auto_refresh).lower(),
str(self.start_open).lower(),
)
#------------------------------------------------------------------------------#
# Useful methods #
#------------------------------------------------------------------------------#
def load_user_configuration(self, user, resource_type, resource_id):
# Retrieve block configuration stored by a user
config = None
try:
blocks_conf = BlockConfiguration.objects.get(blocktype=self.block_name
,user=user
,resource_type=resource_type
,resource_id=resource_id
)
config = blocks_conf.get_configuration()
config = self.from_xml_to_dict(config)
return config
except Exception, e:
pass
return config
def from_xml_to_dict(self, xml_string):
from xml.dom import minidom
d = {}
xmldoc = minidom.parseString(xml_string)
for param in xmldoc.getElementsByTagName("param"):
name = param.attributes['name'].value
val = param.attributes['value'].value
d[name] = val
return d
def read_cookie(self, resource_type, resource_id, cookie):
d = {}
for k,v in cookie.items():
# block_<app>_<resource type>_<resource_id>_<block_name>_ + _<var_name> = <val>
parts = k.split('__')
if len(parts) != 2: continue
block_id = parts[0]
var_name = parts[1]
if block_id[ -len(self.name) : ] != self.name: continue
block_id = block_id[ : - (1+len(self.name)) ]
(dummy, app, t, i) = re.split('_', block_id)
if app != self.app:
continue
if t == resource_type and i == resource_id:
d[var_name] = v
return d
|
Python
| 0.999999
|
@@ -5552,12 +5552,8 @@
' %25
-(str
(sel
@@ -5572,17 +5572,16 @@
ption())
-)
,%0A
|
c9d45a96236b822e2a5ca11490afdb02b9a5e699
|
Drop Py2 and six on tests/unit/states/test_modjk.py
|
tests/unit/states/test_modjk.py
|
tests/unit/states/test_modjk.py
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.states.modjk as modjk
from salt.ext import six
# Import Salt Testing Libs
from tests.support.unit import TestCase
if six.PY2:
LIST_NOT_STR = "workers should be a list not a <type 'unicode'>"
else:
LIST_NOT_STR = "workers should be a list not a <class 'str'>"
class ModjkTestCase(TestCase):
"""
Test cases for salt.states.modjk
"""
# 'worker_stopped' function tests: 1
def test_worker_stopped(self):
"""
Test to stop all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_stopped(name, "app1"), ret)
# 'worker_activated' function tests: 1
def test_worker_activated(self):
"""
Test to activate all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_activated(name, "app1"), ret)
# 'worker_disabled' function tests: 1
def test_worker_disabled(self):
"""
Test to disable all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_disabled(name, "app1"), ret)
# 'worker_recover' function tests: 1
def test_worker_recover(self):
"""
Test to recover all the workers in the modjk load balancer
"""
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": LIST_NOT_STR})
self.assertDictEqual(modjk.worker_recover(name, "app1"), ret)
|
Python
| 0
|
@@ -1,28 +1,4 @@
-# -*- coding: utf-8 -*-%0A
%22%22%22%0A
@@ -60,341 +60,84 @@
%22%22%22%0A
-# Import Python libs%0Afrom __future__ import absolute_import, print_function, unicode_literals%0A%0A# Import Salt Libs%0Aimport salt.states.modjk as modjk%0Afrom salt.ext import six%0A%0A# Import Salt Testing Libs%0Afrom tests.support.unit import TestCase%0A%0Aif six.PY2:%0A LIST_NOT_STR = %22workers should be a list not a %3Ctype 'unicode'%3E%22%0Aelse:%0A
+%0Aimport salt.states.modjk as modjk%0Afrom tests.support.unit import TestCase%0A%0A
LIST
|
0a84c767395bd8cb88711afae8bc94b045d50e78
|
Remove unused import
|
tests/utils/test_train_utils.py
|
tests/utils/test_train_utils.py
|
from typing import Any, Dict
import numpy as np
import pytest
import rasa.utils.train_utils as train_utils
from rasa.core.policies.ted_policy import TEDPolicy
from rasa.nlu.constants import NUMBER_OF_SUB_TOKENS
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
SPLIT_ENTITIES_BY_COMMA,
)
def test_align_token_features():
tokens = [
Token("This", 0, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("is", 5, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("a", 8, data={NUMBER_OF_SUB_TOKENS: 1}),
Token("sentence", 10, data={NUMBER_OF_SUB_TOKENS: 2}),
Token("embedding", 19, data={NUMBER_OF_SUB_TOKENS: 4}),
]
seq_dim = sum(t.get(NUMBER_OF_SUB_TOKENS) for t in tokens)
token_features = np.random.rand(1, seq_dim, 64)
actual_features = train_utils.align_token_features([tokens], token_features)
assert np.all(actual_features[0][0] == token_features[0][0])
assert np.all(actual_features[0][1] == token_features[0][1])
assert np.all(actual_features[0][2] == token_features[0][2])
# sentence is split into 2 sub-tokens
assert np.all(actual_features[0][3] == np.mean(token_features[0][3:5], axis=0))
# embedding is split into 4 sub-tokens
assert np.all(actual_features[0][4] == np.mean(token_features[0][5:10], axis=0))
@pytest.mark.parametrize(
"split_entities_config, expected_initialized_config",
[
(
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
{SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE},
),
(
{"address": False, "ingredients": True},
{
"address": False,
"ingredients": True,
SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
},
),
],
)
def test_init_split_entities_config(
split_entities_config: Any, expected_initialized_config: Dict[(str, bool)],
):
assert (
train_utils.init_split_entities(
split_entities_config, SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE
)
== expected_initialized_config
)
|
Python
| 0.000001
|
@@ -106,60 +106,8 @@
ils%0A
-from rasa.core.policies.ted_policy import TEDPolicy%0A
from
|
a42c361b3014c7acf479528cbd07593ce08d3f41
|
Test or keywords must have a name
|
src/robotide/validators/__init__.py
|
src/robotide/validators/__init__.py
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from robotide.errors import DataError
from robotide.robotapi import is_scalar_var, is_list_var
from robotide import utils
class _AbstractValidator(wx.PyValidator):
"""Implements methods to keep wxPython happy and some helper methods."""
def Clone(self):
return self.__class__()
def TransferFromWindow(self):
return True
def TransferToWindow(self):
return True
def Validate(self, win):
value = self.Window.Value
error = self._validate(value)
if error:
self._show_error(error)
return False
return True
def _show_error(self, message, title='Validation Error'):
ret = wx.MessageBox(message, title, style=wx.ICON_ERROR)
self._set_focus_to_text_control(self.Window)
return ret
def _set_focus_to_text_control(self, ctrl):
ctrl.SetFocus()
ctrl.SelectAll()
class TimeoutValidator(_AbstractValidator):
def _validate(self, value):
time_tokens = utils.split_value(value)
if not time_tokens:
return None
timestr = time_tokens[0]
try:
secs = utils.timestr_to_secs(timestr)
time_tokens[0] = utils.secs_to_timestr(secs)
except DataError, err:
if not '${' in timestr:
return str(err)
self.Window.SetValue(utils.join_value(time_tokens))
return None
class ArgumentsValidator(_AbstractValidator):
def _validate(self, args_str):
try:
types = [ self._get_type(arg) for arg in utils.split_value(args_str) ]
except ValueError:
return "Invalid argument syntax '%s'" % arg
return self._validate_list_args_in_correct_place(types) \
or self._validate_req_args_in_correct_place(types) or None
def _get_type(self, arg):
if is_scalar_var(arg):
return 1
elif is_scalar_var(arg.split("=")[0]):
return 2
elif is_list_var(arg):
return 3
else:
raise ValueError
def _validate_list_args_in_correct_place(self, types):
if 3 in types and types.index(3) != len(types)-1:
return "List variable allowed only as the last argument"
return None
def _validate_req_args_in_correct_place(self, types):
prev = 0
for t in types:
if t < prev:
return ("Required arguments not allowed after arguments "
"with default values.")
prev = t
return None
class NonEmptyValidator(_AbstractValidator):
def __init__(self, field_name):
_AbstractValidator.__init__(self)
self._field_name = field_name
def Clone(self):
return self.__class__(self._field_name)
def _validate(self, value):
if not value:
return '%s cannot be empty' % self._field_name
return None
class DirectoryExistsValidator(_AbstractValidator):
def _validate(self, value):
if not os.path.isdir(value):
return 'Chosen directory must exist'
return None
class NewSuitePathValidator(_AbstractValidator):
def _validate(self, value):
path = os.path.normpath(value)
if os.path.exists(path):
return 'Target file or directory must not exist'
parentdir, filename = os.path.split(path)
if '__init__' in filename:
parentdir = os.path.dirname(parentdir)
if not os.path.exists(parentdir):
return 'Parent directory must exist'
return None
class _NameValidator(_AbstractValidator):
def __init__(self, controller, orig_name=None):
_AbstractValidator.__init__(self)
self._controller = controller
self._orig_name = orig_name
def Clone(self):
return self.__class__(self._controller, self._orig_name)
class TestCaseNameValidator(_NameValidator):
def _validate(self, name):
return self._controller.validate_test_name(name)
class UserKeywordNameValidator(_NameValidator):
def _validate(self, name):
return self._controller.validate_keyword_name(name)
class ScalarVariableNameValidator(_NameValidator):
def _validate(self, name):
if self._orig_name and utils.eq(name, self._orig_name):
return None
return self._controller.validate_scalar_variable_name(name)
class ListVariableNameValidator(_NameValidator):
def _validate(self, name):
if self._orig_name and utils.eq(name, self._orig_name):
return None
return self._controller.validate_list_variable_name(name)
|
Python
| 0.000001
|
@@ -4574,32 +4574,105 @@
te(self, name):%0A
+ if not name:%0A return 'Test case name cannot be empty'%0A
return s
@@ -4786,32 +4786,108 @@
te(self, name):%0A
+ if not name:%0A return 'User keyword name cannot be empty'%0A
return s
|
d5f77356c5f49e04bbbed08b576b48c4b76c6b1f
|
clean up comments.
|
proptools/solid.py
|
proptools/solid.py
|
""" Solid rocket motor equations.
"""
from scipy.integrate import cumtrapz
from proptools import nozzle
def chamber_pressure(K, a, n, rho_solid, c_star):
"""Chamber pressure due to solid propellant combustion.
See equation 12-6 in Rocket Propulsion Elements 8th edition.
Args:
K (scalar): Ratio of burning area to throat area, :math:`A_b/A_t` [units: dimensionless].
a (scalar): Propellant burn rate coefficient [units: meter second**-1 pascal**-n].
n (scalar): Propellant burn rate exponent [units: dimensionless].
rho_solid (scalar): Solid propellant density [units: kilogram meter**-3].
c_star (scalar): Propellant combustion characteristic velocity [units: meter second**-1].
Returns:
Chamber pressure [units: pascal].
"""
return (K * rho_solid * a * c_star) ** (1 / (1 - n))
def burn_area_ratio(p_c, a, n, rho_solid, c_star):
"""Get the burn area ratio, given chamber pressure and propellant properties.
Reference: Equation 12-6 in Rocket Propulsion Elements 8th edition.
Arguments:
p_c (scalar): Chamber pressure [units: pascal].
a (scalar): Propellant burn rate coefficient [units: meter second**-1 pascal**-n].
n (scalar): Propellant burn rate exponent [units: none].
rho_solid (scalar): Solid propellant density [units: kilogram meter**-3].
c_star (scalar): Propellant combustion characteristic velocity [units: meter second**-1].
Returns:
scalar: Ratio of burning area to throat area, :math:`K = A_b/A_t` [units: dimensionless].
"""
return p_c**(1 - n) / (rho_solid * a * c_star)
def burn_and_throat_area(F, p_c, p_e, a, n, rho_solid, c_star, gamma):
"""Given thrust and chamber pressure, and propellant properties, find the burn area and throat area.
Assumes that the exit pressure is matched (:math:`p_e = p_a`).
Arguments:
F (scalar): Thrust force [units: newton].
p_c (scalar): Chamber pressure [units: pascal].
p_e (scalar): Nozzle exit pressure [units: pascal].
a (scalar): Propellant burn rate coefficient [units: meter second**-1 pascal**-n].
n (scalar): Propellant burn rate exponent [units: none].
rho_solid (scalar): Solid propellant density [units: kilogram meter**-3].
c_star (scalar): Propellant combustion characteristic velocity [units: meter second**-1].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
(tuple): tuple containing:
A_b (scalar): Burn area [units: meter**2].
A_t (scalar): Throat area [units: meter**2].
"""
C_F = nozzle.thrust_coef(p_c, p_e, gamma)
A_t = F / (C_F * p_c)
A_b = A_t * burn_area_ratio(p_c, a, n, rho_solid, c_star)
return (A_b, A_t)
def thrust_curve(A_b, x, A_t, A_e, p_a, a, n, rho_solid, c_star, gamma):
"""Thrust vs time curve for a solid rocket motor.
Given information about the evolution of the burning surface of the propellant grain,
this function predicts the time-varying thrust of a solid rocket motor.
The evolution of the burning surface is described by two lists, ``A_b`` and ``x``.
Each element in the lists describes a step in the (discretized) evolution of the burning
surface. ``x[i]`` is the distance which the flame front must progress (normal to the burning
surface) to reach step ``i``. ``A_b[i]`` is the burn area at step ``i``.
Arguments:
A_b (list): Burn area at each step [units: meter**-2].
x (list): flame front progress distance at each step [units: meter].
A_t (scalar): Nozzle throat area [units: meter**2].
A_e (scalar): Nozzle exit area [units: meter**2].
p_a (scalar): Ambient pressure during motor firing [units: pascal].
a (scalar): Propellant burn rate coefficient [units: meter second**-1 pascal**-n].
n (scalar): Propellant burn rate exponent [units: none].
rho_solid (scalar): Solid propellant density [units: kilogram meter**-3].
c_star (scalar): Propellant combustion characteristic velocity [units: meter second**-1].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
(tuple): tuple containing:
t (list): time at each step [units: second].
p_c (list): Chamber pressure at each step [units: pascal].
F (list): Thrust at each step [units: newton].
"""
# Compute chamber pressure and exit pressure each flame progress distance x
# [units: pascal].
p_c = chamber_pressure(A_b / A_t, a, n, rho_solid, c_star)
p_e = p_c * nozzle.pressure_from_er(A_e / A_t, gamma)
# Compute the burn rate for each flame progress distance x [units: meter second**-1]
r = a * p_c**n
# Compute the thrust for each flame progress distance x [units: newton]
F = nozzle.thrust(A_t, p_c, p_e, gamma, p_a, A_e / A_t)
# Compute the time to reach each flame progress distance x [units: second]
t = cumtrapz(1 / r, x, initial=0)
return (t, p_c, F)
|
Python
| 0
|
@@ -26,16 +26,110 @@
uations.
+%0A%0A.. autosummary::%0A%0A chamber_pressure%0A burn_area_ratio%0A burn_and_throat_area%0A thrust_curve
%0A%22%22%22%0A%0Afr
@@ -314,13 +314,20 @@
-See e
+Reference: E
quat
@@ -356,32 +356,33 @@
pulsion Elements
+,
8th edition.%0A%0A
@@ -854,16 +854,24 @@
%0A
+ scalar:
Chamber
@@ -1156,16 +1156,17 @@
Elements
+,
8th edi
@@ -2612,39 +2612,37 @@
eturns:%0A
-(
tuple
-)
: tuple containi
@@ -2645,16 +2645,17 @@
aining:%0A
+%0A
@@ -2693,32 +2693,33 @@
its: meter**2%5D.%0A
+%0A
A_t
@@ -4370,15 +4370,13 @@
-(
tuple
-)
: tu
@@ -4449,16 +4449,17 @@
econd%5D.%0A
+%0A
@@ -4513,32 +4513,33 @@
units: pascal%5D.%0A
+%0A
F (l
|
96dbf260a5c7bf9d5f89951f77792cf1c04d5e38
|
add profiling to perf.py
|
perf.py
|
perf.py
|
import time
from parinfer import indent_mode, paren_mode
def timeProcess(string, options):
numlines = len(string.splitlines())
print "Testing file with", numlines, "lines"
t = time.clock()
indent_mode(string, options)
dt = time.clock() - t
print "Indent Mode:", dt, "s"
t = time.clock()
paren_mode(string, options)
dt = time.clock() - t
print "Paren Mode:", dt, "s"
with open('tests/really_long_file', 'r') as f:
text = f.read()
timeProcess(text, {})
|
Python
| 0.000001
|
@@ -1,12 +1,28 @@
+import cProfile%0A
import time%0A
@@ -399,16 +399,158 @@
t, %22s%22%0A%0A
+ cProfile.runctx(%22indent_mode(string, options)%22, globals(), locals())%0A cProfile.runctx(%22paren_mode(string, options)%22, globals(), locals())%0A%0A
with ope
|
e8df708d7d926d82a7df13031aabbfb64e7347b4
|
Change to work with entryparser changes
|
contrib/plugins/comments/xmlrpcplugins/pingback.py
|
contrib/plugins/comments/xmlrpcplugins/pingback.py
|
from config import py
from libs.pyblosxom import PyBlosxom
from libs.Request import Request
from libs import tools
import cgi, os, re, sgmllib, time, urllib
class parser(sgmllib.SGMLParser):
""" Shamelessly grabbed from Sam Ruby
from http://www.intertwingly.net/code/mombo/pingback.py
"""
""" extract title and hrefs from a web page"""
intitle=0
title = ""
hrefs = []
def do_a(self, attrs):
attrs=dict(attrs)
if attrs.has_key('href'): self.hrefs.append(attrs['href'])
def do_title(self, attrs):
if self.title=="": self.intitle=1
def unknown_starttag(self, tag, attrs):
self.intitle=0
def unknown_endtag(self,tag):
self.intitle=0
def handle_charref(self, ref):
if self.intitle: self.title = self.title + ("&#%s;" % ref)
def handle_data(self,text):
if self.intitle: self.title = self.title + text
def fileFor(req, uri):
config = req.getConfiguration()
data = req.getData()
import libs.entryparsers.__init__
libs.entryparsers.__init__.initialize_extensions()
# import plugins
import libs.plugins.__init__
libs.plugins.__init__.initialize_plugins(config)
req.addHttp({"form": cgi.FieldStorage()})
p = PyBlosxom(req)
p.startup()
data['extensions'] = libs.entryparsers.__init__.ext
data['pi_yr'] = ''
data['pi_mo'] = ''
data['pi_da'] = ''
path_info = uri.split('/')[4:] # get rid of http and script
if path_info[0] == '':
path_info.pop(0)
p.processPathInfo(path_info)
args = { 'request': req }
es = p.defaultFileListHandler(args)
for i in es:
if i['fn'] == data['pi_frag'][1:]:
return i['file_path']
def pingback(request, source, target):
source_file = urllib.urlopen(source.split('#')[0])
source_page = parser()
source_page.feed(source_file.read())
source_file.close()
if source_page.title == "": source_page.title = source
if target in source_page.hrefs:
target_file = fileFor(request, target)
body = ''
try:
from rssfinder import getFeeds
from rssparser import parse
baseurl=source.split("#")[0]
for feed in getFeeds(baseurl):
for item in parse(feed)['items']:
if item['link']==source:
if 'title' in item: title = item['title']
if 'content_encoded' in item: body = item['content_encoded'].strip()
if 'description' in item: body = item['description'].strip() or body
body=re.compile('<.*?>',re.S).sub('',body)
body=re.sub('\s+',' ',body)
body=body[:body.rfind(' ',0,250)][:250] + " ...<br /><br />"
except:
pass
cmt = {'title':source_page.title, \
'author':'Pingback',
'pubDate' : str(time.time()), \
'link': source,
'source' : '',
'description' : body}
from libs.plugins.commentdecorator import writeComment
config = request.getConfiguration()
data = request.getData()
from libs.entries.fileentry import FileEntry
datadir = config['datadir']
entry = FileEntry(config, datadir+'/'+target_file+'.txt', datadir)
data['entry_list'] = [ entry ]
writeComment(config, data, cmt)
return "success pinging %s from %s\n" % (source, target)
else:
return "produce xmlrpc fault here"
def register_xmlrpc_methods():
return {'pingback.ping': pingback }
|
Python
| 0
|
@@ -1005,101 +1005,8 @@
ta()
-%0A import libs.entryparsers.__init__%0A libs.entryparsers.__init__.initialize_extensions()
%0A%0A
@@ -1109,16 +1109,117 @@
(config)
+%0A%0A # do start callback%0A tools.run_callback(%22start%22, %7B'request': req%7D, mappingfunc=lambda x,y:y)
%0A %0A
@@ -1302,24 +1302,25 @@
p.startup()%0A
+%0A
data%5B'ex
@@ -1336,38 +1336,263 @@
%5D =
-libs.entryparsers.__init__.ext
+tools.run_callback(%22entryparser%22,%0A %7B'txt': PyBlosxom.defaultEntryParser%7D,%0A mappingfunc=lambda x,y:y,%0A defaultfunc=lambda x:x)%0A
%0A
|
dbb6fff417a3beac0db7dec603d4793eabc68a89
|
bump version but not calling it stable in readme yet
|
psiturk/version.py
|
psiturk/version.py
|
version_number = '2.2.3'
|
Python
| 0
|
@@ -19,7 +19,7 @@
2.2.
-3
+4
'%0A
|
a804f44414bdb68053e05f04e778ca95f81d5048
|
Add missing import for OrderedDict in query.py
|
psqlextra/query.py
|
psqlextra/query.py
|
from typing import List, Tuple, Optional, Dict, Any
from enum import Enum
from django.db import models
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.core.exceptions import SuspiciousOperation
from .fields import HStoreField
from .expressions import HStoreColumn
from .datastructures import ConditionalJoin
class ConflictAction(Enum):
"""Possible actions to take on a conflict."""
NOTHING = 'NOTHING'
UPDATE = 'UPDATE'
class PostgresQuery(sql.Query):
def rename_annotations(self, annotations) -> None:
"""Renames the aliases for the specified annotations:
.annotate(myfield=F('somestuf__myfield'))
.rename_annotations(myfield='field')
Arguments:
annotations:
The annotations to rename. Mapping the
old name to the new name.
"""
for old_name, new_name in annotations.items():
annotation = self.annotations.get(old_name)
if not annotation:
raise SuspiciousOperation((
'Cannot rename annotation "{old_name}" to "{new_name}", because there'
' is no annotation named "{old_name}".'
).format(old_name=old_name, new_name=new_name))
self._annotations = OrderedDict(
[(new_name, v) if k == old_name else (k, v) for k, v in self._annotations.items()])
self.set_annotation_mask(
(new_name if v == old_name else v for v in self.annotation_select_mask))
def add_join_conditions(self, conditions: Dict[str, Any]) -> None:
"""Adds an extra condition to an existing JOIN.
This allows you to for example do:
INNER JOIN othertable ON (mytable.id = othertable.other_id AND [extra conditions])
This does not work if nothing else in your query doesn't already generate the
initial join in the first place.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
for name, value in conditions.items():
parts = name.split(LOOKUP_SEP)
join_info = self.setup_joins(parts, opts, alias, allow_many=True)
self.trim_joins(join_info[1], join_info[3], join_info[4])
target_table = join_info[3][-1]
field = join_info[1][-1]
join = self.alias_map.get(target_table)
if not join:
raise SuspiciousOperation((
'Cannot add an extra join condition for "%s", there\'s no'
' existing join to add it to.'
) % target_table)
# convert the Join object into a ConditionalJoin object, which
# allows us to add the extra condition
if not isinstance(join, ConditionalJoin):
self.alias_map[target_table] = ConditionalJoin.from_join(join)
join = self.alias_map[target_table]
join.add_condition(field, value)
def add_fields(self, field_names: List[str], allow_m2m: bool=True) -> bool:
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
This overrides the base class's add_fields method. This is called by
the .values() or .values_list() method of the query set. It instructs
the ORM to only select certain values. A lot of processing is neccesarry
because it can be used to easily do joins. For example, `my_fk__name` pulls
in the `name` field in foreign key `my_fk`.
In our case, we want to be able to do `title__en`, where `title` is a HStoreField
and `en` a key. This doesn't really involve a join. We iterate over the specified
field names and filter out the ones that refer to HStoreField and compile it into
an expression which is added to the list of to be selected fields using `self.add_select`.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
cols = []
for name in field_names:
parts = name.split(LOOKUP_SEP)
# it cannot be a special hstore thing if there's no __ in it
if len(parts) > 1:
column_name, hstore_key = parts[:2]
is_hstore, field = self._is_hstore_field(column_name)
if is_hstore:
cols.append(
HStoreColumn(self.model._meta.db_table or self.model.name, field, hstore_key)
)
continue
join_info = self.setup_joins(parts, opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info[1], join_info[3], join_info[4]
)
for target in targets:
cols.append(target.get_col(final_alias))
if cols:
self.set_select(cols)
def _is_hstore_field(self, field_name: str) -> Tuple[bool, Optional[models.Field]]:
"""Gets whether the field with the specified name is a
HStoreField.
Returns
A tuple of a boolean indicating whether the field
with the specified name is a HStoreField, and the
field instance.
"""
field_instance = None
for field in self.model._meta.local_concrete_fields:
if field.name == field_name or field.column == field_name:
field_instance = field
break
return isinstance(field_instance, HStoreField), field_instance
class PostgresInsertQuery(sql.InsertQuery):
"""Insert query using PostgreSQL."""
def __init__(self, *args, **kwargs):
"""Initializes a new instance :see:PostgresInsertQuery."""
super(PostgresInsertQuery, self).__init__(*args, **kwargs)
self.conflict_target = []
self.conflict_action = ConflictAction.UPDATE
self.update_fields = []
def values(self, objs: List, insert_fields: List, update_fields: List=[]):
"""Sets the values to be used in this query.
Insert fields are fields that are definitely
going to be inserted, and if an existing row
is found, are going to be overwritten with the
specified value.
Update fields are fields that should be overwritten
in case an update takes place rather than an insert.
If we're dealing with a INSERT, these will not be used.
Arguments:
objs:
The objects to apply this query to.
insert_fields:
The fields to use in the INSERT statement
update_fields:
The fields to only use in the UPDATE statement.
"""
self.insert_values(insert_fields, objs, raw=False)
self.update_fields = update_fields
|
Python
| 0
|
@@ -1,12 +1,34 @@
+from enum import Enum%0A
from typing
@@ -72,32 +72,46 @@
ny%0Afrom
-enum import Enum
+collections import OrderedDict
%0A%0Afrom d
|
b22b9d8d1e3d84d89368a4ed8ea4fad576f2c7b5
|
Fix a NameError in LibNotifyHandler
|
logbook/notifiers.py
|
logbook/notifiers.py
|
# -*- coding: utf-8 -*-
"""
logbook.notifiers
~~~~~~~~~~~~~~~~~
System notify handlers for OSX and Linux.
:copyright: (c) 2010 by Armin Ronacher, Christopher Grebs.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import base64
import urllib2
from urllib import urlencode
from logbook.base import NOTSET, ERROR, WARNING
from logbook.handlers import Handler
from logbook.helpers import get_application_name
def create_notification_handler(application_name=None, level=NOTSET, icon=None):
"""Creates a handler perfectly fit the current platform. On Linux
systems this creates a :class:`LibNotifyHandler`, on OS X systems it
will create a :class:`GrowlHandler`.
"""
if sys.platform == 'darwin':
return GrowlHandler(application_name, level=level, icon=icon)
return LibNotifyHandler(application_name, level=level, icon=icon)
class GrowlHandler(Handler):
"""A handler that dispatches to Growl. Requires that either growl-py or
py-Growl are installed.
"""
def __init__(self, application_name=None, icon=None, host=None,
password=None, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
# growl is using the deprecated md5 module, but we really don't need
# to see that deprecation warning
from warnings import filterwarnings
filterwarnings(module='Growl', category=DeprecationWarning,
action='ignore')
try:
import Growl
self._growl = Growl
except ImportError:
raise RuntimeError('The growl module is not available. You have '
'to install either growl-py or py-Growl to '
'use the GrowlHandler.')
# if no application name is provided, guess it from the executable
if application_name is None:
application_name = get_application_name()
if icon is not None:
if not os.path.isfile(icon):
raise IOError('Filename to an icon expected.')
icon = self._growl.Image.imageFromPath(icon)
else:
try:
icon = self._growl.Image.imageWithIconForCurrentApplication()
except TypeError:
icon = None
self.application_name = application_name
self._notifier = self._growl.GrowlNotifier(
applicationName=application_name,
applicationIcon=icon,
notifications=['Notset', 'Debug', 'Info', 'Notice', 'Warning',
'Error', 'Critical'],
hostname=host,
password=password
)
self._notifier.register()
def is_sticky(self, record):
"""Returns `True` if the sticky flag should be set for this record.
The default implementation marks errors and criticals sticky.
"""
return record.level >= ERROR
def get_priority(self, record):
"""Returns the priority flag for Growl. Errors and criticals are
get highest priority (2), warnings get higher priority (1) and the
rest gets 0. Growl allows values between -2 and 2.
"""
if record.level >= ERROR:
return 2
elif record.level == WARNING:
return 1
return 0
def make_title(self, record):
"""Called to get the title from the record."""
return u'%s: %s' % (record.channel, record.level_name.title())
def make_text(self, record):
"""Called to get the text of the record."""
return record.message
def emit(self, record):
title = self.make_title(record)
text = self.make_text(record)
self._notifier.notify(record.level_name.title(), title, text,
sticky=self.is_sticky(record),
priority=self.get_priority(record))
class LibNotifyHandler(Handler):
"""A handler that dispatches to libnotify. Requires pynotify installed.
If `no_init` is set to `True` the initialization of libnotify is skipped.
"""
def __init__(self, application_name=None, icon=None, no_init=False, level=NOTSET,
filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
try:
import pynotify
self._pynotify = pynotify
except ImportError:
raise RuntimeError('The pynotify library is required for '
'the LibNotifyHandler.')
if application_name is None:
application_name = get_application_name()
self.application_name = application_name
self.icon = icon
if not self.no_init:
pynotify.init(application_name)
def set_icon(self, notifier, icon):
try:
from gtk import gdk
except ImportError:
#TODO: raise a warning?
raise RuntimeError('The gtk.gdk module is required to set an icon.')
if icon is not None:
if not isinstance(icon, gdk.Pixbuf):
icon = gdk.pixbuf_new_from_file(icon)
notifier.set_icon_from_pixbuf(icon)
def get_expires(self, record):
"""Returns either EXPIRES_DEFAULT or EXPIRES_NEVER for this record.
The default implementation marks errors and criticals as EXPIRES_NEVER.
"""
pn = self._pynotify
return pn.EXPIRES_NEVER if record.level >= ERROR else pn.EXPIRES_DEFAULT
def get_urgency(self, record):
"""Returns the urgency flag for pynotify. Errors and criticals are
get highest urgency (CRITICAL), warnings get higher priority (NORMAL)
and the rest gets LOW.
"""
pn = self._pynotify
if record.level >= ERROR:
return pn.URGENCY_CIRITICAL
elif record.level == WARNING:
return pn.URGENCY_NORMAL
return pn.URGENCY_LOW
def make_summary(self, record):
"""Called to get the summary from the record."""
return u'%s: %s' % (record.channel, record.level_name.title())
def make_body(self, record):
"""Called to get the body of the record."""
return record.message
def emit(self, record):
summary = self.make_summary(record)
body = self.make_body(record)
notifier = self._pynotify.Notification(summary, body)
notifier.set_urgency(self.get_urgency(record))
notifier.set_timeout(self.get_expires(record))
self.set_icon(notifier, self.icon)
notifier.show()
class BoxcarHandler(Handler):
"""Sends notifications to boxcar.io. Can be forwarded to your iPhone or
other compatible device.
"""
api_url = 'https://boxcar.io/notifications/'
def __init__(self, email, password, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.email = email
self.password = password
def get_screen_name(self, record):
"""Returns the value of the screen name field."""
return record.level_name.title()
def get_message(self, record):
"""Returns the message to be attached."""
return record.message
def emit(self, record):
data = {
'notification[from_screen_name]':
self.get_screen_name(record).encode('utf-8'),
'notification[message]':
self.get_message(record).encode('utf-8')
}
req = urllib2.Request(self.api_url, urlencode(data))
req.add_header('Authorization', 'Basic %s' %
base64.encodestring(('%s:%s' % (self.email, self.password))
.encode('utf-8')))
urllib2.urlopen(req).read()
|
Python
| 0.000447
|
@@ -4746,21 +4746,16 @@
if not
-self.
no_init:
|
c5e3188ab7d476805f7acad6bd3a4e6e96ebf440
|
Update cumulusci/tasks/bulkdata/generate_and_load_data.py
|
cumulusci/tasks/bulkdata/generate_and_load_data.py
|
cumulusci/tasks/bulkdata/generate_and_load_data.py
|
import os
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.tasks.bulkdata import LoadData
from cumulusci.utils import temporary_dir
from cumulusci.core.config import TaskConfig
from cumulusci.core.utils import import_global
from cumulusci.core.exceptions import TaskOptionsError
class GenerateAndLoadData(BaseSalesforceApiTask):
""" Orchestrate creating tempfiles, generating data, loading data, cleaning up tempfiles and batching."""
task_docs = """
Use the `num_records` option to specify how many records to generate.
Use the `mappings` option to specify a mapping file.
Use 'data_generation_task' to specify what Python class to use to generate the data.'
Use 'batch_size' to specify how many records to generate and upload in every batch.
By default it creates the data in a temporary file and then cleans it up later. Specify database_url if you
need more control than that. The use of both database_url and batch_size together is not currently supported.
"""
task_options = {
"num_records": {
"description": "How many records to generate. Precise calcuation depends on the generator.",
"required": True,
},
"batch_size": {
"description": "How many records to create and load at a time..",
"required": False,
},
"mapping": {"description": "A mapping YAML file to use", "required": True},
"data_generation_task": {
"description": "Fully qualified class path of a task to generate the data. Use cumulusci.tasks.bulkdata.factory_generator if you would like to use a Factory Module.",
"required": False,
},
"data_generation_options": {
"description": "Options to pass to the data generator.",
"required": False,
},
"database_url": {
"description": "A URL to store the database (defaults to a transient SQLite file)",
"required": "",
},
}
def _run_task(self):
mapping_file = os.path.abspath(self.options["mapping"])
assert os.path.exists(mapping_file), f"{mapping_file} cannot be found."
database_url = self.options.get("database_url")
num_records = int(self.options["num_records"])
batch_size = int(self.options.get("batch_size", num_records))
if database_url and batch_size != num_records:
raise TaskOptionsError(
"You may not specify both `database_url` and `batch_size` options."
)
with temporary_dir() as tempdir:
for current_batch_size, index in self._batches(num_records, batch_size):
self._generate_batch(
database_url, tempdir, mapping_file, current_batch_size, index
)
@staticmethod
def _batches(num_records, batch_size):
num_batches = (num_records // batch_size) + 1
for i in range(0, num_batches):
if i == num_batches - 1: # last batch
batch_size = num_records - (batch_size * i) # leftovers
if batch_size > 0:
yield batch_size, i
def _datagen(self, subtask_options):
class_path = self.options.get("data_generation_task", None)
task_class = import_global(class_path)
task_config = TaskConfig({"options": subtask_options})
data_gen_task = task_class(
self.project_config, task_config, org_config=self.org_config
)
data_gen_task()
def _dataload(self, subtask_options):
subtask_config = TaskConfig({"options": subtask_options})
subtask = LoadData(
project_config=self.project_config,
task_config=subtask_config,
org_config=self.org_config,
flow=self.flow,
name=self.name,
stepnum=self.stepnum,
)
subtask()
def _generate_batch(self, database_url, tempdir, mapping_file, batch_size, index):
if not database_url:
sqlite_path = os.path.join(tempdir, f"generated_data_{index}.db")
database_url = f"sqlite:///" + sqlite_path
subtask_options = {
**self.options,
"mapping": mapping_file,
"database_url": database_url,
"num_records": batch_size,
}
self._datagen(subtask_options)
self._dataload(subtask_options)
|
Python
| 0
|
@@ -576,17 +576,16 @@
%60mapping
-s
%60 option
|
faebedc4621a9a1e6e9b2b20fb4f98f61ba3fcf9
|
Update skbio/parse/sequences/factory.py
|
skbio/parse/sequences/factory.py
|
skbio/parse/sequences/factory.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
from gzip import open as gzip_open
from itertools import chain
from .iterator import FastaIterator, FastqIterator
FILEEXT_MAP = {'fna': (FastaIterator, open),
'fna.gz': (FastaIterator, gzip_open),
'fasta': (FastaIterator, open),
'fasta.gz': (FastaIterator, gzip_open),
'qual': (FastaIterator, open),
'qual.gz': (FastaIterator, gzip_open),
'fastq': (FastqIterator, open),
'fastq.gz': (FastqIterator, gzip_open),
'fq': (FastqIterator, open),
'fq.gz': (FastqIterator, gzip_open)}
def _determine_types_and_openers(files):
"""Attempt to determine the appropriate iterators and openers"""
if files is None:
return [], []
iters = []
openers = []
for fpath in files:
if fpath.endswith('.gz'):
ext = '.'.join(fpath.rsplit('.', 2)[-2:])
else:
ext = fpath.rsplit('.', 1)[-1]
i, o = FILEEXT_MAP.get(ext, (None, None))
if i is None:
raise IOError("Unknown filetype for %s" % fpath)
iters.append(i)
openers.append(o)
return iters, openers
def _is_single_iterator_type(iters):
"""Determine if there is a single or multiple type of iterator
If iters is [], this method returns True it considers the null case to be
a single iterator type.
"""
if iters:
return len(set(iters)) == 1
else:
return True
def _open_or_none(opener, f):
"""Open a file or returns None"""
if not opener:
return None
else:
name = opener.__name__
if not os.path.exists(f):
raise IOError("%s does not appear to exist!" % f)
try:
with opener(f) as opened:
pass
except IOError:
raise IOError("Could not open %s with %s!" % (f, name))
return opened
def load(seqs, qual=None, constructor=None, **kwargs):
"""Construct the appropriate iterator for all your processing needs
This method will attempt to open all files correctly and to feed the
appropriate objects into the correct iterators.
Seqs can list multiple types of files (e.g., FASTA and FASTQ), but if
multiple file types are specified, qual must be None
Parameters
----------
seqs : str or list of sequence file paths
qual : str or list of qual file paths or None
constructor : force a constructor on seqs
kwargs : dict
passed into the subsequent generators.
Returns
-------
SequenceIterator
the return is ``Iterable``
See Also
--------
skbio.parse.sequences.iterator.SequenceIterator
skbio.parse.sequences.iterator.FastaIterator
skbio.parse.sequences.iterator.FastqIterator
"""
if not seqs:
raise ValueError("Must pass in sequences!")
if isinstance(seqs, str):
seqs = [seqs]
if isinstance(qual, str):
qual = [qual]
# i -> iters, o -> openers
if constructor is not None:
i_seqs = [constructor] * len(seqs)
o_seqs = [open] * len(seqs)
else:
i_seqs, o_seqs = _determine_types_and_openers(seqs)
i_qual, o_qual = _determine_types_and_openers(qual)
seqs = [_open_or_none(o, f) for f, o in zip(seqs, o_seqs)]
qual = [_open_or_none(o, f) for f, o in zip(qual or [], o_qual or [])]
if not qual:
qual = None
if not _is_single_iterator_type(i_seqs) and qual is not None:
# chaining Fasta/Fastq for sequence is easy, but it gets nasty quick
# if seqs is a mix of fasta/fastq, with qual coming in as there aren't
# 1-1 mappings. This could be addressed if necessary, but seems like
# an unnecessary block of code right now
raise ValueError("Cannot handle multiple sequence file types and qual "
"at the sametime!")
if _is_single_iterator_type(i_seqs):
seqs_constructor = i_seqs[0]
gen = seqs_constructor(seq=seqs, qual=qual, **kwargs)
else:
gen = chain(*[c(seq=[fp], **kwargs) for c, fp in zip(i_seqs, seqs)])
return gen
|
Python
| 0.000001
|
@@ -2137,20 +2137,29 @@
-pass
+return opened
%0A exc
@@ -2240,27 +2240,8 @@
))%0A%0A
- return opened%0A%0A
%0Adef
|
e9fd001a21c594f3efd076aab73fdb3fafaa49f0
|
fix typo
|
tests/build_utilities/native_config.xpybuild.py
|
tests/build_utilities/native_config.xpybuild.py
|
import os, glob, logging
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.utils.compilers import GCC, VisualStudio
log = logging.getLogger('xpybuild.tests.native_config')
# some basic defaults for recent default compilers for running our testcases with
if IS_WINDOWS:
VSROOT=r'c:\Program Files (x86)\Microsoft Visual Studio *'
if glob.glob(VSROOT):
VSROOT = sorted(glob.glob(VSROOT))[-1] # pick the latest one
else:
raise Exception('Cannot find Visual Studio installed in: %s'%VSROOT)
setGlobalOption('native.include', [
VSROOT+r"\VC\ATLMFC\INCLUDE",
VSROOT+r"\VC\INCLUDE",
r"C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt",
])
if not os.path.exists(r"C:\Program Files (x86)\Windows Kits\10"):
log.warning('WARN - Cannot find expected Windows Kits, got: %s'%sorted(glob.glob(r"C:\Program Files (x86)\Windows Kits\*")))
if not os.path.exists(r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrtx"):
log.warning('WARN - Cannot find expected Windows Kits UCRT, got: %s'%sorted(glob.glob(r"C:\Program Files (x86)\Windows Kits\10\Lib\*\*")))
setGlobalOption('native.libpaths', [
VSROOT+r"\VC\ATLMFC\LIB\amd64",
VSROOT+r"\VC\LIB\amd64",
r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64",
r"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.19041.0\um\x64",
])
setGlobalOption('native.cxx.path', [
VSROOT+r"\Common7\IDE",
VSROOT+r"\VC\BIN\amd64",
VSROOT+r"\Common7\Tools",
r"c:\Windows\Microsoft.NET\Framework\v3.5",
])
setGlobalOption('native.compilers', VisualStudio(VSROOT+r'\VC\bin\amd64'))
setGlobalOption('native.cxx.flags', ['/EHa', '/GR', '/O2', '/Ox', '/Ot', '/MD', '/nologo'])
else:
setGlobalOption('native.compilers', GCC())
setGlobalOption('native.cxx.flags', ['-fPIC', '-O3', '--std=c++0x'])
|
Python
| 0.999991
|
@@ -1033,17 +1033,16 @@
0.0%5Cucrt
-x
%22):%0D%0A%09%09l
|
bdaf7b8f30b6a3a493cc5246dd908bdcdff69ab8
|
Increase test coverage
|
tests/commands/load/test_load_cnv_report_cmd.py
|
tests/commands/load/test_load_cnv_report_cmd.py
|
# -*- coding: utf-8 -*-
import os
from scout.demo import cnv_report_path
from scout.commands import cli
def test_load_cnv_report(mock_app, case_obj):
"""Testing the load delivery report cli command"""
# Make sure the path to delivery report is a valid path
assert os.path.isfile(cnv_report_path)
runner = mock_app.test_cli_runner()
assert runner
# Test CLI function
result = runner.invoke(
cli,
["load", "cnv-report", case_obj["_id"], cnv_report_path, "-u"],
)
assert "saved report to case!" in result.output
assert result.exit_code == 0
|
Python
| 0
|
@@ -420,29 +420,12 @@
oke(
-%0A cli,%0A
+cli,
%5B%22l
@@ -484,21 +484,16 @@
, %22-u%22%5D,
-%0A
)%0A%0A a
@@ -572,8 +572,404 @@
de == 0%0A
+%0A%0Adef test_invalid_path_load_cnv_report(mock_app, case_obj):%0A %22%22%22Testing the load delivery report cli command%22%22%22%0A%0A runner = mock_app.test_cli_runner()%0A assert runner%0A%0A # Test CLI function%0A result = runner.invoke(cli, %5B%22load%22, %22cnv-report%22, case_obj%5B%22_id%22%5D, %22invalid-path%22, %22-u%22%5D,)%0A%0A assert 'Path %22invalid-path%22 does not exist.' in result.output%0A assert result.exit_code == 2%0A
|
2ca2792289fd4c426f77395df92a3c2019c7fae5
|
use SafeLoader
|
tests/retime/robustness/test_robustness_main.py
|
tests/retime/robustness/test_robustness_main.py
|
import pytest
import numpy as np
import yaml
import re
import pandas
import tabulate
import time
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
import toppra
import toppra.constraint as constraint
import toppra.algorithm as algo
import matplotlib.pyplot as plt
def test_robustness_main(request):
""" Load problem suite based on regex, run test and report results.
"""
toppra.setup_logging(request.config.getoption("--loglevel"))
problem_regex = request.config.getoption("--robust_regex")
visualize = request.config.getoption("--visualize")
# parse problems from a configuration file
parsed_problems = []
path = pathlib.Path(__file__)
path = path / '../problem_suite_1.yaml'
problem_dict = yaml.load(path.resolve().read_text())
for key in problem_dict:
if len(problem_dict[key]['ss_waypoints']) == 2:
ss_waypoints = np.linspace(problem_dict[key]['ss_waypoints'][0],
problem_dict[key]['ss_waypoints'][1],
len(problem_dict[key]['waypoints']))
for duration in problem_dict[key]['desired_duration']:
for solver_wrapper in problem_dict[key]['solver_wrapper']:
for nb_gridpoints in problem_dict[key]['nb_gridpoints']:
parsed_problems.append({
"name": key,
"problem_id": "{:}-{:5f}-{:}-{:}".format(key, duration, solver_wrapper, nb_gridpoints),
'waypoints': np.array(problem_dict[key]['waypoints'], dtype=float),
'ss_waypoints': ss_waypoints,
'vlim': np.r_[problem_dict[key]['vlim']],
'alim': np.r_[problem_dict[key]['alim']],
'desired_duration': duration,
'solver_wrapper': solver_wrapper,
'gridpoints': np.linspace(ss_waypoints[0], ss_waypoints[-1], nb_gridpoints),
'nb_gridpoints': nb_gridpoints
})
parsed_problems_df = pandas.DataFrame(parsed_problems)
# solve problems that matched the given regex
all_success = True
for row_index, problem_data in parsed_problems_df.iterrows():
if re.match(problem_regex, problem_data['problem_id']) is None:
continue
t0 = time.time()
path = toppra.SplineInterpolator(
problem_data['ss_waypoints'],
problem_data['waypoints'], bc_type='clamped')
vlim = np.vstack((- problem_data['vlim'], problem_data['vlim'])).T
alim = np.vstack((- problem_data['alim'], problem_data['alim'])).T
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
t1 = time.time()
if problem_data['desired_duration'] == 0:
instance = algo.TOPPRA([pc_vel, pc_acc], path, gridpoints=problem_data['gridpoints'],
solver_wrapper=problem_data['solver_wrapper'])
else:
instance = algo.TOPPRAsd([pc_vel, pc_acc], path, gridpoints=problem_data['gridpoints'],
solver_wrapper=problem_data['solver_wrapper'])
instance.set_desired_duration(problem_data['desired_duration'])
t2 = time.time()
jnt_traj = instance.compute_trajectory(0, 0)
data = instance.problem_data
t3 = time.time()
if visualize:
_t = np.linspace(0, jnt_traj.duration, 100)
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(data["K"][:, 0], c="C0")
axs[0, 0].plot(data["K"][:, 1], c="C0")
axs[0, 0].plot(data["sd"] ** 2, c="C1")
axs[0, 1].plot(_t, jnt_traj(_t))
axs[1, 0].plot(_t, jnt_traj(_t, 1))
axs[1, 1].plot(_t, jnt_traj(_t, 2))
axs[0, 0].set_title("param")
axs[0, 1].set_title("jnt. pos.")
axs[1, 0].set_title("jnt. vel.")
axs[1, 1].set_title("jnt. acc.")
plt.show()
if jnt_traj is None:
all_success = False
parsed_problems_df.loc[row_index, "status"] = "FAIL"
parsed_problems_df.loc[row_index, "duration"] = None
else:
parsed_problems_df.loc[row_index, "status"] = "SUCCESS"
parsed_problems_df.loc[row_index, "duration"] = jnt_traj.duration
parsed_problems_df.loc[row_index, "t_init(ms)"] = (t1 - t0) * 1e3
parsed_problems_df.loc[row_index, "t_setup(ms)"] = (t2 - t1) * 1e3
parsed_problems_df.loc[row_index, "t_solve(ms)"] = (t3 - t2) * 1e3
# get all rows with status different from NaN, then reports other columns.
result_df = parsed_problems_df[parsed_problems_df["status"].notna()][
["status", "duration", "desired_duration", "name", "solver_wrapper",
"nb_gridpoints", "problem_id", "t_init(ms)", "t_setup(ms)", "t_solve(ms)"]]
result_df.to_csv('%s.result' % __file__)
print("Test summary\n")
print(tabulate.tabulate(result_df, result_df.columns))
assert all_success, "Unable to solve some problems in the test suite"
|
Python
| 0
|
@@ -790,16 +790,40 @@
d_text()
+, Loader=yaml.SafeLoader
)%0A fo
|
a403dff24e33e6b0ef0b31b4342a9b978f9090f2
|
Improve admin display
|
src/cerberus_ac/admin.py
|
src/cerberus_ac/admin.py
|
# -*- coding: utf-8 -*-
"""Admin module."""
from django.contrib import admin
from .models import (
AccessHistory, PrivilegeHistory, Role, RoleHierarchy, RolePrivilege)
# class SecurityAdmin(AdminSite):
# pass
#
#
# class DataAdmin(AdminSite):
# pass
#
#
# class AuditAdmin(AdminSite):
# pass
#
#
# security_admin_site = SecurityAdmin(name='SecurityAdmin')
# data_admin_site = DataAdmin(name='DataAdmin')
# audit_admin_site = AuditAdmin(name='AuditAdmin')
#
# Use decorator like @security_admin_site.register(AccessHistory)
# TODO: override save_model methods for history
# https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_model
class RoleAdmin(admin.ModelAdmin):
"""Role admin class."""
class RolePrivilegeAdmin(admin.ModelAdmin):
"""Role privilege admin class."""
class RoleHierarchyAdmin(admin.ModelAdmin):
"""Role hierarchy admin class."""
class AccessHistoryAdmin(admin.ModelAdmin):
"""Acces history admin class."""
class PrivilegeHistoryAdmin(admin.ModelAdmin):
"""Privilege history admin class."""
# class HierarchyHistoryAdmin(admin.ModelAdmin):
# pass
admin.site.register(Role, RoleAdmin)
admin.site.register(RolePrivilege, RolePrivilegeAdmin)
admin.site.register(RoleHierarchy, RoleHierarchyAdmin)
admin.site.register(AccessHistory, AccessHistoryAdmin)
admin.site.register(PrivilegeHistory, PrivilegeHistoryAdmin)
# admin.site.register(HierarchyHistory, HierarchyHistoryAdmin)
|
Python
| 0
|
@@ -72,16 +72,193 @@
t admin%0A
+from django.core.urlresolvers import reverse%0Afrom django.utils.safestring import mark_safe%0Afrom django.utils.translation import ugettext_lazy as _%0A%0Afrom .apps import AppSettings
%0Afrom .m
@@ -1096,24 +1096,101 @@
class.%22%22%22%0A%0A
+ list_display = ('role_type_a', 'role_id_a', 'role_type_b', 'role_id_b')%0A%0A
%0Aclass Acces
@@ -1264,96 +1264,1508 @@
%22%22%0A%0A
-%0Aclass PrivilegeHistoryAdmin(admin.ModelAdmin):%0A %22%22%22Privilege history admin class.%22%22%22
+ list_display = (%0A 'role_type',%0A 'role_id',%0A 'response',%0A 'response_type',%0A 'access_type',%0A 'resource_type',%0A 'resource_id',%0A 'datetime',%0A 'conveyor_type',%0A 'conveyor_id'%0A )%0A%0A%0Aclass PrivilegeHistoryAdmin(admin.ModelAdmin):%0A %22%22%22Privilege history admin class.%22%22%22%0A%0A list_display = (%0A 'datetime',%0A 'user',%0A 'action',%0A 'role_type',%0A 'role_id',%0A 'role_link',%0A 'authorized',%0A 'access_type',%0A 'resource_type',%0A 'resource_id',%0A 'resource_link')%0A%0A def role_link(self, obj):%0A instance = AppSettings.get_mapping().instance_from_name_and_id(%0A obj.resource_type, obj.resource_id)%0A info = (instance._meta.app_label, instance._meta.model_name)%0A admin_url = reverse('admin:%25s_%25s_change' %25 info,%0A args=(instance.pk,))%0A return mark_safe('%3Ca href=%22%25s%22%3E%25s%3C/a%3E' %25 (admin_url, instance))%0A role_link.short_description = _('Role link')%0A%0A def resource_link(self, obj):%0A instance = AppSettings.get_mapping().instance_from_name_and_id(%0A obj.resource_type, obj.resource_id)%0A info = (instance._meta.app_label, instance._meta.model_name)%0A admin_url = reverse('admin:%25s_%25s_change' %25 info,%0A args=(instance.pk,))%0A return mark_safe('%3Ca href=%22%25s%22%3E%25s%3C/a%3E' %25 (admin_url, instance))%0A resource_link.short_description = _('Resource link')
%0A%0A%0A#
|
8a29107eb7c061b0559081ddea263b5ddaa9d804
|
Modify return to handle cases with less than 10 documents
|
search.py
|
search.py
|
import getopt
import sys
import pickle
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from collections import Counter
import math
import string
import operator
import heapq
from pprint import pprint
dictionary = {}
postings_file = None
postings_sizes = []
starting_byte_offset = 0
all_doc_ids = []
def usage():
print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -l lengths-file -o output-file-of-results")
def getPosting(index_of_term):
# calculate byte offset
posting_offset = 0 if index_of_term - 1 < 0 else postings_sizes[index_of_term - 1]
byte_offset = starting_byte_offset + posting_offset
postings_file.seek(byte_offset, 0)
posting = pickle.load(postings_file)
return posting
def preprocess_query(query):
stemmer = PorterStemmer()
punctuations = set(string.punctuation)
return Counter([stemmer.stem(token) for token in word_tokenize(query.lower()) if token not in punctuations])
def handleQuery(query):
query = preprocess_query(query)
scores = {} # To be replaced by heapq
query_weights = []
for term, query_tf in query.items():
if term in dictionary:
dict_entry = dictionary.get(term)
postings_entry = getPosting(dict_entry['index'])
idf = math.log10(len(lengths) / dict_entry['doc_freq'])
query_tf_weight = 1 + math.log10(query_tf)
for doc_id, doc_tf in postings_entry:
doc_tf_weight = 1 + math.log10(doc_tf)
if doc_id not in scores:
scores[doc_id] = 0
scores[doc_id] += doc_tf_weight * query_tf_weight * idf
query_weights.append(query_tf_weight * idf)
query_l2_norm = math.sqrt(sum([math.pow(1 + math.log10(query_weight), 2) for query_weight in query_weights]))
for doc_id, score in scores.items():
scores[doc_id] /= lengths[doc_id] * query_l2_norm
#heapq by default is min heap, so * -1 to all score value
scores_heap = [(-score, doc_id) for doc_id, score in scores.items()]
heapq.heapify(scores_heap)
result = []
for i in range(10):
result.append(heapq.heappop(scores_heap)[1])
return result
if __name__ == '__main__':
dict_path = postings_path = query_path = output_path = lengths_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:l:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dict_path = a
elif o == '-p':
postings_path = a
elif o == '-q':
query_path = a
elif o == '-o':
output_path = a
elif o == '-l':
lengths_path = a
else:
assert False, "unhandled option"
if dict_path == None or postings_path == None or query_path == None or output_path == None or lengths_path == None:
usage()
sys.exit(2)
with open(dict_path, 'rb') as f:
dictionary = pickle.load(f)
with open(lengths_path, 'rb') as f:
lengths = pickle.load(f)
# load postings object sizes to calculate seek offset from current position of file
postings_file = open(postings_path, 'rb')
postings_sizes = pickle.load(postings_file)
starting_byte_offset = postings_file.tell()
output_file = open(output_path, 'w')
with open(query_path, 'r') as f:
for line in f:
line = line.strip()
if line != '':
result = handleQuery(line)
output = ' '.join(result)
print('OUTPUT', output)
output_file.write(output + '\n')
output_file.close()
postings_file.close()
|
Python
| 0.000001
|
@@ -1951,102 +1951,175 @@
)%0A%0A%09
-result = %5B%5D%0A%09for i in range(10):%0A%09%09result.append(heapq.heappop(scores_heap)%5B1%5D)%0A%09return result
+if len(scores_heap) %3E= 10:%0A%09%09return %5Bheapq.heappop(scores_heap)%5B1%5D for i in range(10)%5D%0A%09else:%0A%09%09return %5Bheapq.heappop(scores_heap)%5B1%5D for i in range(len(scores_heap))%5D
%0A%0Aif
|
5103ac24b5513441ea1ea318cfdb7b6a6425b648
|
Add query weight calculations
|
search.py
|
search.py
|
import getopt
import sys
import pickle
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
import math
import string
import operator
dictionary = {}
postings_file = None
postings_sizes = []
starting_byte_offset = 0
all_doc_ids = []
def usage():
print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -l lengths-file -o output-file-of-results")
def getPosting(index_of_term):
# calculate byte offset
posting_offset = 0 if index_of_term - 1 < 0 else postings_sizes[index_of_term - 1]
byte_offset = starting_byte_offset + posting_offset
postings_file.seek(byte_offset, 0)
posting = pickle.load(postings_file)
return posting
def preprocess_query(query):
stemmer = PorterStemmer()
punctuations = set(string.punctuation)
return [stemmer.stem(token) for token in word_tokenize(query.lower()) if token not in punctuations]
def handleQuery(query):
query = preprocess_query(query)
scores = {} # To be replaced by heapq
for term in query:
if term in dictionary:
dict_entry = dictionary.get(term)
postings_entry = getPosting(dict_entry['index'])
idf = math.log10(len(lengths) / dict_entry['doc_freq'])
for doc_id, term_freq in postings_entry:
tf = 1 + math.log10(term_freq)
if doc_id not in scores:
scores[doc_id] = 0
scores[doc_id] += tf * idf / lengths[doc_id]
return [item[0] for item in sorted(scores.items(), key=lambda x:operator.itemgetter(1)(x), reverse=True)[:10]]
if __name__ == '__main__':
dict_path = postings_path = query_path = output_path = lengths_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:l:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dict_path = a
elif o == '-p':
postings_path = a
elif o == '-q':
query_path = a
elif o == '-o':
output_path = a
elif o == '-l':
lengths_path = a
else:
assert False, "unhandled option"
if dict_path == None or postings_path == None or query_path == None or output_path == None or lengths_path == None:
usage()
sys.exit(2)
with open(dict_path, 'rb') as f:
dictionary = pickle.load(f)
with open(lengths_path, 'rb') as f:
lengths = pickle.load(f)
# load postings object sizes to calculate seek offset from current position of file
postings_file = open(postings_path, 'rb')
postings_sizes = pickle.load(postings_file)
starting_byte_offset = postings_file.tell()
output_file = open(output_path, 'w')
with open(query_path, 'r') as f:
for line in f:
line = line.strip()
if line != '':
try:
result = handleQuery(line)
output = ' '.join(result)
output_file.write(output + '\n')
except Exception as e:
output_file.write('\n')
print('****WARN***** EXCEPTION THROWN', e)
continue
output_file.close()
postings_file.close()
|
Python
| 0.000006
|
@@ -115,16 +115,48 @@
okenize%0A
+from collections import Counter%0A
import m
@@ -826,16 +826,24 @@
%09return
+Counter(
%5Bstemmer
@@ -926,16 +926,17 @@
uations%5D
+)
%0A%0Adef ha
@@ -1030,16 +1030,132 @@
pq%0A%09
-for term
+query_l2_norm = math.sqrt(sum(%5Bmath.pow(1 + math.log10(query_tf), 2) for t, query_tf in query.items()%5D))%0A%09for term, query_tf
in
@@ -1159,16 +1159,24 @@
in query
+.items()
:%0A%09%09if t
@@ -1342,16 +1342,62 @@
freq'%5D)%0A
+%09%09%09query_tf_weight = 1 + math.log10(query_tf)%0A
%09%09%09for d
@@ -1403,25 +1403,22 @@
doc_id,
-term_freq
+doc_tf
in post
@@ -1433,18 +1433,29 @@
ry:%0A%09%09%09%09
+doc_tf_weigh
t
-f
= 1 + m
@@ -1468,17 +1468,14 @@
g10(
-term_freq
+doc_tf
)%0A%09%09
@@ -1551,18 +1551,106 @@
+=
-tf * idf
+doc_tf_weight * query_tf_weight * idf%0A%0A%0A%09for doc_id, score in scores.items():%0A%09%09 scores%5Bdoc_id%5D
/
+=
len
@@ -1661,17 +1661,32 @@
%5Bdoc_id%5D
-%0A
+ * query_l2_norm
%0A%09return
|
b7f4696d1384f656df71332055cd4ea87f85e3c9
|
Bump to v0.2.3.
|
luminoth/__init__.py
|
luminoth/__init__.py
|
__version__ = '0.2.3dev0'
__title__ = 'Luminoth'
__description__ = 'Computer vision toolkit based on TensorFlow'
__uri__ = 'https://luminoth.ai'
__doc__ = __description__ + ' <' + __uri__ + '>'
__author__ = 'Tryolabs'
__email__ = 'luminoth@tryolabs.com'
__license__ = 'BSD 3-Clause License'
__copyright__ = 'Copyright (c) 2018 Tryolabs S.A.'
__min_tf_version__ = '1.5'
import sys
# Check for a current TensorFlow installation.
try:
import tensorflow # noqa: F401
except ImportError:
sys.exit("""Luminoth requires a TensorFlow >= {} installation.
Depending on your use case, you should install either `tensorflow` or
`tensorflow-gpu` packages manually or via PyPI.""".format(__min_tf_version__))
# Import functions that are part of Luminoth's public interface.
from luminoth.cli import cli # noqa
from luminoth.io import read_image # noqa
from luminoth.tasks import Detector # noqa
from luminoth.vis import vis_objects # noqa
|
Python
| 0
|
@@ -17,12 +17,8 @@
.2.3
-dev0
'%0A%0A_
|
ea5e6ca2e6523f0b2a585112b5fd5f18e9fcf969
|
add namespace
|
ds9/library/parser.tac
|
ds9/library/parser.tac
|
%{
%}
%token INT_
%token REAL_
%token STRING_
%token FOOCMD_
%token EXITCMD_
%token CLOSE_
%token FIT_
%token OPEN_
%token TO_
%%
commands : commands command
| command
;
int : INT_ {set _ $1}
;
command : FOOCMD_ foo
| EXITCMD_ exit
;
numeric : int {set _ $1}
| REAL_ {set _ $1}
;
foo : STRING_ {puts "STRING $1"}
| INT_ {puts "INT $1"}
| REAL_ {puts "REAL $1"}
| OPEN_ {puts "OPEN"}
| CLOSE_ {puts "CLOSE"}
| TO_ fooTo
;
fooTo: FIT_ {puts "TO FIT"}
| numeric {puts "TO NUMERIC $1"}
;
exit: {puts "EXIT"; QuitDS9}
;
%%
proc yyerror {s} {
puts stderr "parse error:"
puts stderr "$::yy_buffer"
puts stderr [format "%*s" $::yy_index ^]
}
|
Python
| 0.000015
|
@@ -545,16 +545,20 @@
%25%0A%0Aproc
+yy::
yyerror
@@ -614,16 +614,18 @@
tderr %22$
+yy
::yy_buf
@@ -661,16 +661,18 @@
%22%25*s%22 $
+yy
::yy_ind
|
3a2f56b6c6a0e4a00426d786adc875e494fc323a
|
Make CSV export generic
|
ebmdatalab/bigquery.py
|
ebmdatalab/bigquery.py
|
import psycopg2
from google.cloud import bigquery
from google.cloud.bigquery import SchemaField
import time
import csv
import tempfile
from os import environ
PRESCRIBING_SCHEMA = [
SchemaField('sha', 'STRING'),
SchemaField('pct', 'STRING'),
SchemaField('practice', 'STRING'),
SchemaField('bnf_code', 'STRING'),
SchemaField('bnf_name', 'STRING'),
SchemaField('items', 'INTEGER'),
SchemaField('net_cost', 'FLOAT'),
SchemaField('actual_cost', 'FLOAT'),
SchemaField('quantity', 'INTEGER'),
SchemaField('month', 'TIMESTAMP'),
]
PRESENTATION_SCHEMA = [
SchemaField('bnf_code', 'STRING'),
SchemaField('name', 'STRING'),
SchemaField('is_generic', 'BOOLEAN'),
SchemaField('active_quantity', 'FLOAT'),
SchemaField('adq', 'FLOAT'),
SchemaField('adq_unit', 'STRING'),
SchemaField('percent_of_adq', 'FLOAT'),
]
PRACTICE_SCHEMA = [
SchemaField('code', 'STRING'),
SchemaField('name', 'STRING'),
SchemaField('address1', 'STRING'),
SchemaField('address2', 'STRING'),
SchemaField('address3', 'STRING'),
SchemaField('address4', 'STRING'),
SchemaField('address5', 'STRING'),
SchemaField('postcode', 'STRING'),
SchemaField('location', 'STRING'),
SchemaField('area_team_id', 'STRING'),
SchemaField('ccg_id', 'STRING'),
SchemaField('setting', 'INTEGER'),
SchemaField('close_date', 'STRING'),
SchemaField('join_provider_date', 'STRING'),
SchemaField('leave_provider_date', 'STRING'),
SchemaField('open_date', 'STRING'),
SchemaField('status_code', 'STRING'),
]
PRACTICE_STATISTICS_SCHEMA = [
SchemaField('month', 'TIMESTAMP'),
SchemaField('male_0_4', 'INTEGER'),
SchemaField('female_0_4', 'INTEGER'),
SchemaField('male_5_14', 'INTEGER'),
SchemaField('male_15_24', 'INTEGER'),
SchemaField('male_25_34', 'INTEGER'),
SchemaField('male_35_44', 'INTEGER'),
SchemaField('male_45_54', 'INTEGER'),
SchemaField('male_55_64', 'INTEGER'),
SchemaField('male_65_74', 'INTEGER'),
SchemaField('male_75_plus', 'INTEGER'),
SchemaField('female_5_14', 'INTEGER'),
SchemaField('female_15_24', 'INTEGER'),
SchemaField('female_25_34', 'INTEGER'),
SchemaField('female_35_44', 'INTEGER'),
SchemaField('female_45_54', 'INTEGER'),
SchemaField('female_55_64', 'INTEGER'),
SchemaField('female_65_74', 'INTEGER'),
SchemaField('female_75_plus', 'INTEGER'),
SchemaField('total_list_size', 'INTEGER'),
SchemaField('astro_pu_cost', 'FLOAT'),
SchemaField('astro_pu_items', 'FLOAT'),
SchemaField('star_pu', 'STRING'),
SchemaField('pct_id', 'STRING'),
SchemaField('practice', 'STRING')
]
def get_env_setting(setting, default=None):
""" Get the environment setting.
Return the default, or raise an exception if none supplied
"""
try:
return environ[setting]
except KeyError:
if default:
return default
else:
error_msg = "Set the %s env variable" % setting
raise StandardError(error_msg)
def load_data_from_file(
dataset_name, table_name,
source_file_name, schema, _transform=None):
client = bigquery.Client(project='ebmdatalab')
dataset = client.dataset(dataset_name)
table = dataset.table(
table_name,
schema=schema)
if not table.exists():
table.create()
table.reload()
with tempfile.TemporaryFile(mode='rb+') as csv_file:
with open(source_file_name, 'rb') as source_file:
writer = csv.writer(csv_file)
reader = csv.reader(source_file)
for row in reader:
if _transform:
row = _transform(row)
writer.writerow(row)
job = table.upload_from_file(
csv_file, source_format='text/csv',
create_disposition="CREATE_IF_NEEDED",
write_disposition="WRITE_TRUNCATE",
rewind=True)
wait_for_job(job)
def load_prescribing_data_from_file(
dataset_name, table_name, source_file_name):
def _transform(row):
# To match the prescribing table format in BigQuery, we have
# to re-encode the date field as a bigquery TIMESTAMP and drop
# a couple of columns
row[10] = "%s 00:00:00" % row[10]
del(row[3])
del(row[-1])
return row
return load_data_from_file(
dataset_name, table_name,
source_file_name, PRESCRIBING_SCHEMA, _transform=_transform)
def load_statistics_from_pg():
def _transform(row):
row[0] = "%s 00:00:00" % row[0]
return row
schema = PRACTICE_STATISTICS_SCHEMA
cols = [x.name for x in schema]
cols[0] = 'date'
cols[-1] = 'practice_id'
load_data_from_pg(
'hscic', 'practice_statistics', 'frontend_practicestatistics',
schema, cols=cols, _transform=_transform)
def load_presentation_from_pg():
def _transform(row):
if row[2] == 't':
row[2] = 'true'
else:
row[2] = 'false'
return row
load_data_from_pg(
'hscic', 'presentation', 'frontend_presentation',
PRESENTATION_SCHEMA, _transform=_transform)
def load_data_from_pg(dataset_name, bq_table_name,
pg_table_name, schema, cols=None, _transform=None):
"""Loads every row currently in the `frontend_practice` table to the
specified table in BigQuery
"""
db_name = get_env_setting('DB_NAME')
db_user = get_env_setting('DB_USER')
db_pass = get_env_setting('DB_PASS')
db_host = get_env_setting('DB_HOST', '127.0.0.1')
conn = psycopg2.connect(database=db_name, user=db_user,
password=db_pass, host=db_host)
with tempfile.NamedTemporaryFile(mode='r+b') as csv_file:
if not cols:
cols = [x.name for x in schema]
conn.cursor().copy_to(
csv_file, pg_table_name, sep=',', null='', columns=cols)
csv_file.seek(0)
load_data_from_file(
dataset_name, bq_table_name,
csv_file.name,
schema,
_transform
)
conn.commit()
conn.close()
def wait_for_job(job):
while True:
job.reload()
if job.state == 'DONE':
if job.error_result:
raise RuntimeError(job.error_result)
return
time.sleep(1)
|
Python
| 0.000118
|
@@ -5851,29 +5851,62 @@
-conn.cursor().copy_to
+sql = %22COPY %25s(%25s) TO STDOUT (FORMAT CSV, NULL '')%22 %25
(%0A
@@ -5914,26 +5914,16 @@
- csv_file,
pg_tabl
@@ -5934,38 +5934,84 @@
me,
-sep=',', null='', columns=cols
+%22,%22.join(cols))%0A conn.cursor().copy_expert(%0A sql, csv_file
)%0A
|
d0e28e5049ecd70bb67ab0cc565e5f22986ddee7
|
fix exception when raising exceptions!
|
elfinder/exceptions.py
|
elfinder/exceptions.py
|
from django.utils.translation import ugettext as _
class ElfinderErrorMessages:
"""
Standard error message codes, the text message of which is handled by the
elFinder client
"""
ERROR_UNKNOWN = 'errUnknown'
ERROR_UNKNOWN_CMD = 'errUnknownCmd'
ERROR_CONF = 'errConf'
ERROR_CONF_NO_JSON = 'errJSON'
ERROR_CONF_NO_VOL = 'errNoVolumes'
ERROR_INV_PARAMS = 'errCmdParams'
ERROR_OPEN = 'errOpen'
ERROR_DIR_NOT_FOUND = 'errFolderNotFound'
ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.'
ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder "$1" not found.'
ERROR_NOT_DIR = 'errNotFolder'
ERROR_NOT_FILE = 'errNotFile'
ERROR_PERM_DENIED = 'errPerm'
ERROR_LOCKED = 'errLocked' #'"$1" is locked and can not be renamed, moved or removed.'
ERROR_EXISTS = 'errExists' #'File named "$1" already exists.'
ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.'
ERROR_MKDIR = 'errMkdir'
ERROR_MKFILE = 'errMkfile'
ERROR_RENAME = 'errRename'
ERROR_COPY = 'errCopy'
ERROR_MOVE = 'errMove'
ERROR_COPY_FROM = 'errCopyFrom'
ERROR_COPY_TO = 'errCopyTo'
ERROR_COPY_ITSELF = 'errCopyInItself'
ERROR_REPLACE = 'errReplace' #'Unable to replace "$1".'
ERROR_RM = 'errRm' #'Unable to remove "$1".'
ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)'
ERROR_UPLOAD = 'errUpload' #'Upload error.'
ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload "$1".'
ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.'
ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.'
ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.'
ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.'
ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'"$1" transfer error.'
ERROR_ACCESS_DENIED = 'errAccess'
ERROR_NOT_REPLACE = 'errNotReplace' #Object "$1" already exists at this location and can not be replaced with object of another type.
ERROR_SAVE = 'errSave'
ERROR_EXTRACT = 'errExtract'
ERROR_ARCHIVE = 'errArchive'
ERROR_NOT_ARCHIVE = 'errNoArchive'
ERROR_ARCHIVE_TYPE = 'errArcType'
ERROR_ARC_SYMLINKS = 'errArcSymlinks'
ERROR_ARC_MAXSIZE = 'errArcMaxSize'
ERROR_RESIZE = 'errResize'
ERROR_UNSUPPORT_TYPE = 'errUsupportType'
ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content'
ERROR_NETMOUNT = 'errNetMount'
ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver'
ERROR_NETMOUNT_FAILED = 'errNetMountFailed'
class VolumeNotFoundError(Exception):
def __init__(self):
super(VolumeNotFoundError, self).__init__(_("Volume could not be found"))
class FileNotFoundError(Exception):
def __init__(self):
super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND)
class DirNotFoundError(Exception):
def __init__(self):
super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND)
class PermissionDeniedError(Exception):
def __init__(self):
super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED)
class NamedError(Exception):
"""
Elfinder-specific exception.
`msg` contains the error code
`name` holds the path for which operation failed
"""
def __init__(self, msg, name):
self.name = name
super(NamedError, self).__init__(msg)
class NotAnImageError(Exception):
def __init__(self):
super(DirNotFoundError, self).__init__(_('This is not a valid image file'))
|
Python
| 0
|
@@ -3118,34 +3118,39 @@
super(
-DirNotFoun
+PermissionDenie
dError, self
@@ -3551,35 +3551,34 @@
super(
-DirNotFound
+NotAnImage
Error, self)
|
818d89c897603eeb33caf1ca2cdaeae5c3010880
|
Use passed directory in mako engine.
|
engines/mako_engine.py
|
engines/mako_engine.py
|
#!/usr/bin/env python
"""Provide the mako templating engine."""
from __future__ import print_function
from mako.template import Template
from mako.lookup import TemplateLookup
from . import Engine
class MakoEngine(Engine):
"""Mako templating engine."""
handle = 'mako'
def __init__(self, template, tolerant=False, **kwargs):
"""Initialize mako template."""
super(MakoEngine, self).__init__(**kwargs)
default_filters = ['filter_undefined'] if tolerant else None
encoding_errors = 'replace' if tolerant else 'strict'
imports = ['def filter_undefined(value):\n'
' if value is UNDEFINED:\n'
' return \'<UNDEFINED>\'\n'
' return value\n']
lookup = TemplateLookup(directories=['.'])
self.template = Template(template,
default_filters=default_filters,
encoding_errors=encoding_errors,
imports=imports,
lookup=lookup,
strict_undefined=not tolerant,
)
def apply(self, mapping):
"""Apply a mapping of name-value-pairs to a template."""
return self.template.render(**mapping)
|
Python
| 0
|
@@ -311,16 +311,30 @@
emplate,
+ dirname=None,
toleran
@@ -445,16 +445,140 @@
wargs)%0A%0A
+ directories = %5Bdirname%5D if dirname is not None else %5B'.'%5D%0A lookup = TemplateLookup(directories=directories)%0A%0A
@@ -901,59 +901,8 @@
n'%5D%0A
- lookup = TemplateLookup(directories=%5B'.'%5D)%0A
|
cb59aaff2d120ffa5f15b822ec21d3137f90184f
|
Bump logistic_requisition version - fixup
|
logistic_requisition/__openerp__.py
|
logistic_requisition/__openerp__.py
|
# -*- coding: utf-8 -*-
#
#
# Author: Joël Grand-Guillaume, Jacques-Etienne Baudoux, Guewen Baconnier
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{"name": "Logistics Requisition",
"version": "1.4",
"author": "Camptocamp,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Purchase Management",
'complexity': "normal",
"images": [],
"website": "http://www.camptocamp.com",
"depends": ["sale_sourced_by_line",
"sale_owner_stock_sourcing",
"stock_dropshipping",
"purchase",
"purchase_requisition_bid_selection",
"mail",
"logistic_order",
"logistic_consignee",
"ngo_purchase",
"transport_information",
"purchase_requisition_transport_document",
"purchase_requisition_transport_multi_address",
"sale_transport_multi_address",
],
"demo": ['data/logistic_requisition_demo.xml'],
"data": ["wizard/assign_line_view.xml",
"wizard/cost_estimate_view.xml",
"wizard/logistic_requisition_cancel_view.xml",
"security/logistic_requisition.xml",
"security/ir.model.access.csv",
"data/logistic_requisition_data.xml",
"data/logistic_requisition_sequence.xml",
"view/logistic_requisition.xml",
"view/sale_order.xml",
"view/stock.xml",
"view/cancel_reason.xml",
"view/purchase_order.xml",
"view/report_logistic_requisition.xml",
"logistic_requisition_report.xml",
"data/logistic.requisition.cancel.reason.csv",
],
"test": ['test/line_assigned.yml',
'test/requisition_create_cost_estimate.yml',
'test/requisition_create_cost_estimate_only.yml',
'test/requisition_sourcing_with_tender.yml',
'test/requisition_cancel_reason.yml',
'test/logistic_requisition_report_test.yml',
],
'css': ['static/src/css/logistic_requisition.css'],
'installable': True,
'auto_install': False,
}
|
Python
| 0
|
@@ -879,16 +879,18 @@
n%22: %221.4
+.1
%22,%0A %22aut
|
dcbc52ba5f4c4e82dbad8b37064b6dae18302468
|
add hostManagementIface to autogeneration
|
xos/synchronizer/steps/sync_onos_netcfg.py
|
xos/synchronizer/steps/sync_onos_netcfg.py
|
import os
import requests
import socket
import sys
import base64
import json
from synchronizers.base.syncstep import SyncStep
from core.models import Service, Slice, Controller, ControllerSlice, ControllerUser, Node, TenantAttribute, Tag
from services.vtn.models import VTNService
from xos.logger import Logger, logging
logger = Logger(level=logging.INFO)
class SyncONOSNetcfg(SyncStep):
provides=[]
observes=None
requested_interval=0
def __init__(self, **args):
SyncStep.__init__(self, **args)
def get_node_tag(self, node, tagname):
tags = Tag.select_by_content_object(node).filter(name=tagname)
return tags[0].value
def get_tenants_who_want_config(self):
tenants = []
# attribute is comma-separated list
for ta in TenantAttribute.objects.filter(name="autogenerate"):
if ta.value:
for config in ta.value.split(','):
if config == "vtn-network-cfg":
tenants.append(ta.tenant)
return tenants
def save_tenant_attribute(self, tenant, name, value):
tas = TenantAttribute.objects.filter(tenant=tenant, name=name)
if tas:
ta = tas[0]
if ta.value != value:
logger.info("updating %s with attribute" % name)
ta.value = value
ta.save()
else:
logger.info("saving autogenerated config %s" % name)
ta = TenantAttribute(tenant=tenant, name=name, value=value)
ta.save()
# This function currently assumes a single Deployment and Site
def get_onos_netcfg(self, vtn):
privateGatewayMac = vtn.privateGatewayMac
localManagementIp = vtn.localManagementIp
ovsdbPort = vtn.ovsdbPort
sshPort = vtn.sshPort
sshUser = vtn.sshUser
sshKeyFile = vtn.sshKeyFile
mgmtSubnetBits = vtn.mgmtSubnetBits
xosEndpoint = vtn.xosEndpoint
xosUser = vtn.xosUser
xosPassword = vtn.xosPassword
data = {
"apps" : {
"org.opencord.vtn" : {
"cordvtn" : {
"privateGatewayMac" : privateGatewayMac,
"localManagementIp": localManagementIp,
"ovsdbPort": ovsdbPort,
"ssh": {
"sshPort": sshPort,
"sshUser": sshUser,
"sshKeyFile": sshKeyFile
},
"xos": {
"endpoint": xosEndpoint,
"user": xosUser,
"password": xosPassword
},
"publicGateways": [],
"nodes" : []
}
}
}
}
# Generate apps->org.opencord.vtn->cordvtn->openstack
controllers = Controller.objects.all()
if controllers:
controller = controllers[0]
keystone_server = controller.auth_url
user_name = controller.admin_user
tenant_name = controller.admin_tenant
password = controller.admin_password
openstack = {
"endpoint": keystone_server,
"tenant": tenant_name,
"user": user_name,
"password": password
}
data["apps"]["org.opencord.vtn"]["cordvtn"]["openstack"] = openstack
# Generate apps->org.opencord.vtn->cordvtn->nodes
nodes = Node.objects.all()
for node in nodes:
nodeip = socket.gethostbyname(node.name)
try:
bridgeId = self.get_node_tag(node, "bridgeId")
dataPlaneIntf = self.get_node_tag(node, "dataPlaneIntf")
dataPlaneIp = self.get_node_tag(node, "dataPlaneIp")
except:
logger.error("not adding node %s to the VTN configuration" % node.name)
continue
node_dict = {
"hostname": node.name,
"hostManagementIp": "%s/%s" % (nodeip, mgmtSubnetBits),
"bridgeId": bridgeId,
"dataPlaneIntf": dataPlaneIntf,
"dataPlaneIp": dataPlaneIp
}
data["apps"]["org.opencord.vtn"]["cordvtn"]["nodes"].append(node_dict)
# Generate apps->org.onosproject.cordvtn->cordvtn->publicGateways
# Pull the gateway information from vRouter
try:
from services.vrouter.models import VRouterService
vrouters = VRouterService.get_service_objects().all()
if vrouters:
for gateway in vrouters[0].get_gateways():
gatewayIp = gateway['gateway_ip'].split('/',1)[0]
gatewayMac = gateway['gateway_mac']
gateway_dict = {
"gatewayIp": gatewayIp,
"gatewayMac": gatewayMac
}
data["apps"]["org.opencord.vtn"]["cordvtn"]["publicGateways"].append(gateway_dict)
except:
logger.info("No VRouter service present, not adding publicGateways to config")
return json.dumps(data, indent=4, sort_keys=True)
def call(self, **args):
vtn_service = VTNService.get_service_objects().all()
if not vtn_service:
raise Exception("No VTN Service")
vtn_service = vtn_service[0]
# Check for autogenerate attribute
netcfg = self.get_onos_netcfg(vtn_service)
tenants = self.get_tenants_who_want_config()
for tenant in tenants:
self.save_tenant_attribute(tenant, "rest_onos/v1/network/configuration/", netcfg)
|
Python
| 0.000001
|
@@ -4331,32 +4331,236 @@
p%0A %7D%0A
+%0A # this one is optional%0A try:%0A node_dict%5B%22hostManagementIface%22%5D = self.get_node_tag(node, %22hostManagementIface%22)%0A except IndexError:%0A pass%0A%0A
data
|
13260904c6d34d7554eea8152ceaa2ee8601e3e9
|
Add missing import
|
pulldb/volumes.py
|
pulldb/volumes.py
|
# Copyright 2013 Russell Heilling
from datetime import datetime
import logging
from math import ceil
import re
from google.appengine.api import search
from google.appengine.ext import ndb
import pycomicvine
from pulldb import base
from pulldb import publishers
from pulldb import subscriptions
from pulldb import util
from pulldb.models.admin import Setting
from pulldb.models import comicvine
from pulldb.models.volumes import Volume, volume_key
class MainPage(base.BaseHandler):
def get(self):
template_values = self.base_template_values()
template = self.templates.get_template('volumes.html')
self.response.write(template.render(template_values))
class Search(base.BaseHandler):
def get(self):
def volume_detail(comicvine_volume):
try:
volume = volume_key(comicvine_volume).get()
subscription = False
subscription_key = subscriptions.subscription_key(volume.key)
if subscription_key:
subscription = subscription_key.urlsafe()
publisher_key = volume.publisher
publisher = None
if publisher_key:
publisher = publisher_key.get()
return {
'volume_key': volume.key.urlsafe(),
'volume': volume,
'publisher': publisher,
'subscribed': bool(subscription),
}
except AttributeError:
logging.warn('Could not look up volume %r', comicvine_volume)
cv = comicvine.load()
query = self.request.get('q')
volume_ids = self.request.get('volume_ids')
page = int(self.request.get('page', 0))
limit = int(self.request.get('limit', 20))
offset = page * limit
if volume_ids:
volumes = re.findall(r'(\d+)', volume_ids)
logging.debug('Found volume ids: %r', volumes)
results = []
for index in range(0, len(volumes), 100):
volume_page = volumes[index:min([index+100, len(volumes)])]
results.append(cv.fetch_volume_batch(volume_page))
results_count = len(results)
logging.debug('Found volumes: %r' % results)
elif query:
results_count, results = cv.search_volume(query, page=page, limit=limit)
logging.debug('Found volumes: %r' % results)
if offset + limit > results_count:
page_end = results_count
else:
page_end = offset + limit
logging.info('Retrieving results %d-%d / %d', offset, page_end,
results_count)
results_page = results[offset:page_end]
template_values = self.base_template_values()
template_values.update({
'query': query,
'volume_ids': volume_ids,
'page': page,
'limit': limit,
'results': (volume_detail(volume) for volume in results_page),
'results_count': results_count,
'page_url': util.StripParam(self.request.url, 'page'),
'page_count': int(ceil(1.0*results_count/limit)),
})
template = self.templates.get_template('volumes_search.html')
self.response.write(template.render(template_values))
class RefreshVolumes(base.BaseHandler):
def get(self):
# When run from cron cycle over all issues weekly
shard_count=7
shard=date.today().weekday()
cv = comicvine.load()
refresh_callback = partial(
refresh_volume_shard, int(shard), int(shard_count), comicvine=cv)
query = Subscription.query(projection=('volume',), distinct=True)
volume_keys = query.map(refresh_callback)
update_count = sum([1 for volume in volume_keys if volume])
status = 'Updated %d/%d volumes' % (
update_count, len(volume_keys))
logging.info(status)
app = base.create_app([
('/volumes', MainPage),
('/volumes/search', Search),
])
|
Python
| 0.000466
|
@@ -57,16 +57,22 @@
datetime
+, date
%0Aimport
|
d3057d336332f8315580cc7fa7c6e3d3fb1cbcc8
|
Use a format-string to build the command
|
py3status/modules/taskwarrior.py
|
py3status/modules/taskwarrior.py
|
# -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '{task}')
filter: arguments passed to the command
(default 'start.before:today status:pending')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
SAMPLE OUTPUT
{'full_text': '1 Prepare first draft, 2 Buy milk'}
"""
import json
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
filter = 'start.before:today status:pending'
def post_config_hook(self):
if not self.py3.check_commands('task'):
raise Exception(STRING_NOT_INSTALLED)
def taskWarrior(self):
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
task_command = 'task ' + self.filter + ' export'
task_json = json.loads(self.py3.command_output(task_command))
task_result = ', '.join(map(describeTask, task_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': task_result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
Python
| 0.00028
|
@@ -1063,19 +1063,28 @@
= 'task
+%25s export
'
-+
+%25
self.fi
@@ -1091,20 +1091,8 @@
lter
- + ' export'
%0A
|
9fc7ce4d0af6e1cb923c8f3a5ca5d693b5c52f4c
|
set SESSION_SAVE_EVERY_REQUEST = True
|
gem/settings/base.py
|
gem/settings/base.py
|
"""
Django settings for base gem.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from os.path import abspath, dirname, join
from os import environ
from django.conf import global_settings
from django.utils.translation import ugettext_lazy as _
import dj_database_url
# Absolute filesystem path to the Django project directory:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "dqji)!xte^trgai!3c)_4)ftaoevwvbog-i&nl$#ef9xb+y*ab"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Base URL to use when referring to full URLs within the Wagtail admin
# backend - e.g. in notification emails. Don't include '/admin' or
# a trailing slash
BASE_URL = 'http://example.com'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'taggit',
'modelcluster',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailsites',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'molo.core',
'gem',
'molo.profiles',
'mptt',
'django_comments',
'django.contrib.sites',
'molo.commenting',
'molo.yourwords',
'molo.servicedirectory',
'molo.polls',
'raven.contrib.django.raven_compat',
)
COMMENTS_APP = 'molo.commenting'
COMMENTS_FLAG_THRESHHOLD = 3
COMMENTS_HIDE_REMOVED = False
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'gem.middleware.ForceDefaultLanguageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
ROOT_URLCONF = 'gem.urls'
WSGI_APPLICATION = 'gem.wsgi.application'
SESSION_COOKIE_AGE = 60 * 10 # 10 minutes
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# SQLite (simplest install)
DATABASES = {'default': dj_database_url.config(
default='sqlite:///%s' % (join(PROJECT_ROOT, 'db.sqlite3'),))}
# PostgreSQL (Recommended, but requires the psycopg2 library and Postgresql
# development headers)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'base',
# 'USER': 'postgres',
# 'PASSWORD': '',
# 'HOST': '', # Set to empty string for localhost.
# 'PORT': '', # Set to empty string for default.
# # number of seconds database connections should persist for
# 'CONN_MAX_AGE': 600,
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# Django compressor settings
# http://django-compressor.readthedocs.org/en/latest/settings/
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Template configuration
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'molo.core.context_processors.locale',
'gem.context_processors.default_forms',
)
# Wagtail settings
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
WAGTAIL_SITE_NAME = "GEM"
# Use Elasticsearch as the search backend for extra performance and better
# search results:
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#search
# http://wagtail.readthedocs.org/en/latest/core_components/
# search/backends.html#elasticsearch-backend
#
# WAGTAILSEARCH_BACKENDS = {
# 'default': {
# 'BACKEND': ('wagtail.wagtailsearch.backends.'
# 'elasticsearch.ElasticSearch'),
# 'INDEX': 'base',
# },
# }
# Whether to use face/feature detection to improve image
# cropping - requires OpenCV
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = False
IMAGE_COMPRESSION_QUALITY = 85
# Additional strings that need translations from other modules
# molo.polls
_("Log in to vote")
_("Username already exists.")
_("Vote")
_("Show Results")
# The `SITE_STATIC_PREFIX` is appended to certain static files in base.html,
# via a templatetag, so that we can use this for different regions:
# Indonesia vs. Rwanda.
# - the site logo
# - style.css
SITE_STATIC_PREFIX = environ.get('SITE_STATIC_PREFIX', '').lower()
|
Python
| 0.999814
|
@@ -2740,16 +2740,142 @@
ation'%0A%0A
+# GEM-195%0A# Automatically log users out after 10 mins of inactivity%0A# Closing the browser window/tab will NOT end the session%0A
SESSION_
@@ -2908,16 +2908,50 @@
minutes
+%0ASESSION_SAVE_EVERY_REQUEST = True
%0A%0A# Data
|
c4652d7772a0d9b374fc178502a71efd03d35d48
|
Remove hs.parse_eventid
|
synapse/server.py
|
synapse/server.py
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides some classes for setting up (partially-populated)
# homeservers; either as a full homeserver as a real application, or a small
# partial one for unit test mocking.
# Imports required for the default HomeServer() implementation
from synapse.federation import initialize_http_replication
from synapse.events.utils import serialize_event
from synapse.notifier import Notifier
from synapse.api.auth import Auth
from synapse.handlers import Handlers
from synapse.state import StateHandler
from synapse.storage import DataStore
from synapse.types import EventID
from synapse.util import Clock
from synapse.util.distributor import Distributor
from synapse.util.lockutils import LockManager
from synapse.streams.events import EventSources
from synapse.api.ratelimiting import Ratelimiter
from synapse.crypto.keyring import Keyring
from synapse.events.builder import EventBuilderFactory
class BaseHomeServer(object):
"""A basic homeserver object without lazy component builders.
This will need all of the components it requires to either be passed as
constructor arguments, or the relevant methods overriding to create them.
Typically this would only be used for unit tests.
For every dependency in the DEPENDENCIES list below, this class creates one
method,
def get_DEPENDENCY(self)
which returns the value of that dependency. If no value has yet been set
nor was provided to the constructor, it will attempt to call a lazy builder
method called
def build_DEPENDENCY(self)
which must be implemented by the subclass. This code may call any of the
required "get" methods on the instance to obtain the sub-dependencies that
one requires.
"""
DEPENDENCIES = [
'clock',
'http_client',
'db_name',
'db_pool',
'persistence_service',
'replication_layer',
'datastore',
'handlers',
'auth',
'rest_servlet_factory',
'state_handler',
'room_lock_manager',
'notifier',
'distributor',
'resource_for_client',
'resource_for_federation',
'resource_for_web_client',
'resource_for_content_repo',
'resource_for_server_key',
'resource_for_media_repository',
'event_sources',
'ratelimiter',
'keyring',
'event_builder_factory',
]
def __init__(self, hostname, **kwargs):
"""
Args:
hostname : The hostname for the server.
"""
self.hostname = hostname
self._building = {}
# Other kwargs are explicit dependencies
for depname in kwargs:
setattr(self, depname, kwargs[depname])
@classmethod
def _make_dependency_method(cls, depname):
def _get(self):
if hasattr(self, depname):
return getattr(self, depname)
if hasattr(self, "build_%s" % (depname)):
# Prevent cyclic dependencies from deadlocking
if depname in self._building:
raise ValueError("Cyclic dependency while building %s" % (
depname,
))
self._building[depname] = 1
builder = getattr(self, "build_%s" % (depname))
dep = builder()
setattr(self, depname, dep)
del self._building[depname]
return dep
raise NotImplementedError(
"%s has no %s nor a builder for it" % (
type(self).__name__, depname,
)
)
setattr(BaseHomeServer, "get_%s" % (depname), _get)
# TODO: Why are these parse_ methods so high up along with other globals?
# Surely these should be in a util package or in the api package?
def parse_eventid(self, s):
"""Parse the string given by 's' as a Event ID and return a EventID
object."""
return EventID.from_string(s)
def serialize_event(self, e, as_client_event=True):
return serialize_event(self, e, as_client_event)
def get_ip_from_request(self, request):
# May be an X-Forwarding-For header depending on config
ip_addr = request.getClientIP()
if self.config.captcha_ip_origin_is_x_forwarded:
# use the header
if request.requestHeaders.hasHeader("X-Forwarded-For"):
ip_addr = request.requestHeaders.getRawHeaders(
"X-Forwarded-For"
)[0]
return ip_addr
def is_mine(self, domain_specific_string):
return domain_specific_string.domain == self.hostname
# Build magic accessors for every dependency
for depname in BaseHomeServer.DEPENDENCIES:
BaseHomeServer._make_dependency_method(depname)
class HomeServer(BaseHomeServer):
"""A homeserver object that will construct most of its dependencies as
required.
It still requires the following to be specified by the caller:
resource_for_client
resource_for_web_client
resource_for_federation
resource_for_content_repo
http_client
db_pool
"""
def build_clock(self):
return Clock()
def build_replication_layer(self):
return initialize_http_replication(self)
def build_datastore(self):
return DataStore(self)
def build_handlers(self):
return Handlers(self)
def build_notifier(self):
return Notifier(self)
def build_auth(self):
return Auth(self)
def build_state_handler(self):
return StateHandler(self)
def build_room_lock_manager(self):
return LockManager()
def build_distributor(self):
return Distributor()
def build_event_sources(self):
return EventSources(self)
def build_ratelimiter(self):
return Ratelimiter()
def build_keyring(self):
return Keyring(self)
def build_event_builder_factory(self):
return EventBuilderFactory(
clock=self.get_clock(),
hostname=self.hostname,
)
|
Python
| 0.000104
|
@@ -1151,42 +1151,8 @@
ore%0A
-from synapse.types import EventID%0A
from
@@ -4264,323 +4264,8 @@
t)%0A%0A
- # TODO: Why are these parse_ methods so high up along with other globals?%0A # Surely these should be in a util package or in the api package?%0A%0A def parse_eventid(self, s):%0A %22%22%22Parse the string given by 's' as a Event ID and return a EventID%0A object.%22%22%22%0A return EventID.from_string(s)%0A%0A
|
ae9b7bf45832b69f6a3ca94e24ed6d2e7d3a384d
|
Update __init__.py
|
gemstone/__init__.py
|
gemstone/__init__.py
|
"""
Build microservices with Python
"""
from gemstone.core.microservice import MicroService
from gemstone.core.decorators import private_api_method, public_method, event_handler
from gemstone.core.handlers import TornadoJsonRpcHandler
from gemstone.client.remote_service import RemoteService
from gemstone.util import as_completed, first_completed, make_callbacks
__author__ = "Vlad Calin"
__email__ = "vlad.s.calin@gmail.com"
__version__ = "0.4.0"
__all__ = [
# core classes
'MicroService',
'RemoteService',
# decorators
'public_method',
'private_api_method',
'event_handler',
# tornado handler
'TornadoJsonRpcHandler',
# async utilities
'as_completed',
'first_completed',
'make_callbacks'
]
|
Python
| 0.000072
|
@@ -445,9 +445,9 @@
%220.
-4
+5
.0%22%0A
|
322d10c8932f160a305fab126401e3f172453e7e
|
Refactor prototxt generation.
|
generate_prototxt.py
|
generate_prototxt.py
|
#!/usr/bin/python
import os
import sys
caffe_root = os.getenv('CAFFE_ROOT', './')
sys.path.insert(0, caffe_root + '/python')
import caffe
from caffe import layers as L, params as P
def lenet():
n = caffe.NetSpec()
# empty layers as placeholders
# the resulting prototxt must be edited manually
n.data = L.Input()
n.label = L.Input()
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=2, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
return n.to_proto()
with open('lenet_auto_train.prototxt', 'w') as f:
f.write(str(lenet()))
with open('lenet_auto_test.prototxt', 'w') as f:
f.write(str(lenet()))
|
Python
| 0
|
@@ -188,16 +188,33 @@
f lenet(
+batch_size, phase
):%0A n
@@ -943,24 +943,56 @@
='xavier'))%0A
+%0A if (phase == 'TRAIN'):%0A
n.loss
@@ -1029,16 +1029,83 @@
.label)%0A
+ else if (phase == 'TEST'):%0A n.prob = L.Softmax(n.score)%0A
%0A
@@ -1194,24 +1194,35 @@
e(str(lenet(
+50, 'TRAIN'
)))%0A%0Awith op
@@ -1255,32 +1255,32 @@
xt', 'w') as f:%0A
-
f.write(str(
@@ -1285,13 +1285,23 @@
r(lenet(
+50, 'TEST'
)))%0A%0A
|
ef80c3374b41b9ab1b390adda5a2e4c16252c137
|
fix pep8
|
pyedflib/tests/test_edfreader.py
|
pyedflib/tests/test_edfreader.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Holger Nahrstaedt
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from datetime import datetime
# from numpy.testing import (assert_raises, run_module_suite,
# assert_equal, assert_allclose, assert_almost_equal)
import unittest
import pyedflib
class TestEdfReader(unittest.TestCase):
def setUp(self):
data_dir = os.path.join(os.getcwd(), 'data')
self.edf_data_file = os.path.join(data_dir, 'test_generator.edf')
def test_EdfReader(self):
try:
f = pyedflib.EdfReader(self.edf_data_file)
except IOError:
print('cannot open', self.edf_data_file)
return
ann_index, ann_duration, ann_text = f.readAnnotations()
np.testing.assert_almost_equal(ann_index[0], 0)
np.testing.assert_almost_equal(ann_index[1], 600)
np.testing.assert_equal(f.signals_in_file, 11)
np.testing.assert_equal(f.datarecords_in_file, 600)
for i in np.arange(11):
np.testing.assert_almost_equal(f.getSampleFrequencies()[i], 200)
np.testing.assert_equal(f.getNSamples()[i], 120000)
f._close()
del f
def test_EdfReader_headerInfos(self):
try:
f = pyedflib.EdfReader(self.edf_data_file)
except IOError:
print('cannot open', self.edf_data_file)
return
datetimeSoll = datetime(2011,4,4,12,57,2)
np.testing.assert_equal(f.getStartdatetime(),datetimeSoll)
np.testing.assert_equal(f.getPatientCode(), b'abcxyz99')
np.testing.assert_equal(f.getPatientName(), b'Hans Muller')
np.testing.assert_equal(f.getGender(), b'Male')
np.testing.assert_equal(f.getBirthdate(), b'30 jun 1969')
np.testing.assert_equal(f.getPatientAdditional(), b'patient')
np.testing.assert_equal(f.getAdmincode(), b'Dr. X')
np.testing.assert_equal(f.getTechnician(), b'Mr. Spotty')
np.testing.assert_equal(f.getRecordingAdditional(), b'unit test file')
np.testing.assert_equal(f.getFileDuration(), 600)
fileHeader = f.getHeader()
np.testing.assert_equal(fileHeader["patientname"], b'Hans Muller')
f._close()
del f
def test_EdfReader_signalInfos(self):
try:
f = pyedflib.EdfReader(self.edf_data_file)
except IOError:
print('cannot open', self.edf_data_file)
return
np.testing.assert_equal(f.getSignalLabels()[0], b'squarewave')
np.testing.assert_equal(f.getLabel(0), b'squarewave')
np.testing.assert_equal(f.getPhysicalDimension(0), b'uV')
np.testing.assert_equal(f.getPrefilter(0), b'pre1')
np.testing.assert_equal(f.getTransducer(0), b'trans1')
np.testing.assert_equal(f.getSampleFrequency(0), 200)
np.testing.assert_equal(f.getSampleFrequencies()[0], 200)
np.testing.assert_equal(f.getSignalLabels()[1], b'ramp')
np.testing.assert_equal(f.getSignalLabels()[2], b'pulse')
np.testing.assert_equal(f.getSignalLabels()[3], b'noise')
np.testing.assert_equal(f.getSignalLabels()[4], b'sine 1 Hz')
np.testing.assert_equal(f.getSignalLabels()[5], b'sine 8 Hz')
f._close()
del f
if __name__ == '__main__':
# run_module_suite(argv=sys.argv)
unittest.main()
|
Python
| 0.000001
|
@@ -741,28 +741,16 @@
return%0A
-
%0A
@@ -1450,24 +1450,16 @@
return
-
%0A
@@ -2505,32 +2505,16 @@
return
-
%0A
|
739df1a5be70f5044d7c6be357776e36ae330ce3
|
Fix a bug in the OpenMP backend.
|
pyfr/backends/openmp/compiler.py
|
pyfr/backends/openmp/compiler.py
|
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import tempfile
import itertools as it
from abc import ABCMeta, abstractmethod
from ctypes import CDLL
import numpy as np
from pyfr.ctypesutil import platform_libname
from pyfr.nputil import npdtype_to_ctypestype
from pyfr.util import memoize, chdir
class SourceModule(object):
__metaclass__ = ABCMeta
_dir_seq = it.count()
def __init__(self, src, cfg):
self._src = src
self._cfg = cfg
# Create a scratch directory
tmpdir = tempfile.mkdtemp(prefix='pyfr-%d-' % next(self._dir_seq))
try:
with chdir(tmpdir):
# Compile and link the source
lname = self._build()
# Load
self._mod = CDLL(os.path.abspath(lname))
finally:
shutil.rmtree(tmpdir)
def function(self, name, restype, argtypes):
# Get the function
fn = getattr(self._mod, name)
fn.restype = npdtype_to_ctypestype(restype)
fn.argtypes = [npdtype_to_ctypestype(a) for a in argtypes]
return fn
@abstractmethod
def _build(self):
pass
class GccSourceModule(SourceModule):
def __init__(self, src, cfg):
# Find GCC (or a compatible alternative)
self._cc = cfg.getpath('backend-c', 'cc', 'cc', abs=False)
# Delegate
super(GccSourceModule, self).__init__(src, cfg)
def _build(self):
# File names
cn, on, ln = 'tmp.c', 'tmp.o', platform_libname('tmp')
# Write the source code out
with open(cn, 'w') as f:
f.write(self._src)
# Compile
cmd = [self._cc,
'-std=c99', # Enable C99 support
'-Ofast', # Optimise, incl. -ffast-math
'-march=native', # Use CPU-specific instructions
'-fopenmp', # Enable OpenMP support
'-fPIC', # Position-independent code for shared lib
'-c', '-o', on, cn]
out = subprocess.check_call(cmd, stderr=subprocess.STDOUT)
# Link
cmd = [self._cc,
'-shared', # Create a shared library
'-fopenmp', # Required for OpenMP
'-o', ln, on,]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return ln
|
Python
| 0
|
@@ -1327,17 +1327,22 @@
backend-
-c
+openmp
', 'cc',
|
81d41ceaf89848851d8353021f01f41c724aaae8
|
Add docstring to PySplunk
|
pygraphc/misc/splunk/pysplunk.py
|
pygraphc/misc/splunk/pysplunk.py
|
from os import system, remove
class PySplunk(object):
def __init__(self, username, source, host, output_mode, tmp_file='/tmp/pysplunk_cluster.csv'):
self.username = username
self.source = source.replace(' ', '\ ')
self.host = host
self.output_mode = output_mode
self.tmp_file = tmp_file
def get_splunk_cluster(self):
# run Python Splunk API command
command = 'python search.py --username=' + self.username + ' "search source=' + self.source + \
' host=' + self.host + ' sourcetype=linux_secure | cluster labelfield=cluster_id labelonly=t |' \
' table cluster_id _raw | sort _time | reverse" ' + '--output_mode=' + \
self.output_mode + " > " + self.tmp_file
system(command)
# get clusters
with open(self.tmp_file, 'r') as f:
logs = f.readlines()
clusters = {}
for index, log in enumerate(logs):
cluster_id = log.split(',')[0]
clusters[cluster_id] = clusters.get(cluster_id, []) + [index]
# remove tmp_file
remove(self.tmp_file)
return clusters
|
Python
| 0.000001
|
@@ -57,102 +57,933 @@
-def __init__(self, username, source, host, output_mode, tmp_file='/tmp/pysplunk_cluster.csv'):
+%22%22%22Get log clustering using Python Splunk API %5BSplunkDev2016%5D_.%0A%0A References%0A ----------%0A .. %5BSplunkDev2016%5D Command line examples in the Splunk SDK for Python.%0A http://dev.splunk.com/view/python-sdk/SP-CAAAEFK%0A %22%22%22%0A def __init__(self, username, source, host, output_mode, tmp_file='/tmp/pysplunk_cluster.csv'):%0A %22%22%22The constructor of class PySplunk.%0A%0A Parameters%0A ----------%0A username : str%0A Username to access Splunk daemon. No password required since we use Splunk free version.%0A source : str%0A Identifier for log source. It is usually filename of log.%0A host : str%0A Hostname for the source log.%0A output_mode : str%0A Output for clustering result. Recommended output is csv%0A tmp_file : str%0A Path for temporary clustering result.%0A %22%22%22
%0A
@@ -1192,16 +1192,209 @@
(self):%0A
+ %22%22%22Get log clusters.%0A%0A Returns%0A -------%0A clusters : dict%0A Dictionary of log cluster. Key: cluster_id, value: list of log line identifier.%0A %22%22%22%0A
@@ -2145,32 +2145,32 @@
remove tmp_file%0A
+
remove(s
@@ -2183,17 +2183,16 @@
p_file)%0A
-%0A
|
15c7851fda49caa4a0e13e4a128fed739e384e69
|
fix discrete unif problem when using int64: no idea what its about, but we should fix it at some point
|
pymc/tests/test_distributions.py
|
pymc/tests/test_distributions.py
|
import itertools as its
from checks import *
from pymc import *
from numpy import array, inf
from scipy import integrate
from numdifftools import Gradient
R = array([-inf, -2.1, -1, -.01, .0, .01, 1, 2.1, inf])
Rplus = array([0, .01, .1, .9, .99, 1, 1.5, 2, 100, inf])
Rplusbig = array([0, .5, .9, .99, 1, 1.5, 2, 20, inf])
Unit = array([0, .001, .1, .5, .75, .99, 1])
Runif = array([-1, -.4, 0, .4, 1])
Rdunif = array([-10, 0, 10])
Rplusunif = array([0, .5, inf])
Rplusdunif = array([2, 10, 100])
I = array([-1000, -3, -2, -1, 0, 1, 2, 3, 1000], 'int64')
NatSmall = array([0, 3, 4, 5, 1000], 'int64')
Nat = array([0, 1, 2, 3, 2000], 'int64')
NatBig = array([0, 1, 2, 3, 5000, 50000], 'int64')
Bool = array([0, 0, 1, 1], 'int64')
def test_unif():
checkd(Uniform, Runif, {'lower': -Rplusunif, 'upper': Rplusunif})
def test_discrete_unif():
checkd(DiscreteUniform, Rdunif,
{'lower': -Rplusdunif, 'upper': Rplusdunif})
def test_flat():
checkd(Flat, Runif, {}, False)
def test_normal():
checkd(Normal, R, {'mu': R, 'tau': Rplus})
def test_beta():
checkd(Beta, Unit, {'alpha': Rplus * 5, 'beta': Rplus * 5})
def test_exponential():
checkd(Exponential, Rplus, {'lam': Rplus})
def test_geometric():
checkd(Geometric, NatBig, {'p': Unit})
def test_negative_binomial():
checkd(NegativeBinomial, Nat, {'mu': Rplusbig, 'alpha': Rplusbig})
def test_laplace():
checkd(Laplace, R, {'mu': R, 'b': Rplus})
def test_t():
checkd(T, R, {'nu': Rplus, 'mu': R, 'lam': Rplus})
def test_cauchy():
checkd(Cauchy, R, {'alpha': R, 'beta': Rplusbig})
def test_gamma():
checkd(Gamma, Rplus, {'alpha': Rplusbig, 'beta': Rplusbig})
def test_tpos():
checkd(Tpos, Rplus, {'nu': Rplus, 'mu': R, 'lam': Rplus}, False)
def test_binomial():
checkd(Binomial, Nat, {'n': NatSmall, 'p': Unit})
def test_betabin():
checkd(BetaBin, Nat, {'alpha': Rplus, 'beta': Rplus, 'n': NatSmall})
def test_bernoulli():
checkd(Bernoulli, Bool, {'p': Unit})
def test_poisson():
checkd(Poisson, Nat, {'mu': Rplus})
def test_constantdist():
checkd(ConstantDist, I, {'c': I})
def test_zeroinflatedpoisson():
checkd(ZeroInflatedPoisson, I, {'theta': Rplus, 'z': Bool})
def test_densitydist():
def logp(x):
return -log(2 * .5) - abs(x - .5) / .5
checkd(DensityDist, R, {}, extra_args={'logp': logp})
def test_addpotential():
with Model() as model:
x = Normal('x', 1, 1)
model.AddPotential(-x ** 2)
check_dlogp(model, x, [R])
def checkd(distfam, valuedomain, vardomains,
check_int=True, check_der=True, extra_args={}):
with Model() as m:
vars = dict((v, Flat(
v, dtype=dom.dtype)) for v, dom in vardomains.iteritems())
vars.update(extra_args)
# print vars
value = distfam(
'value', **vars)
vardomains['value'] = np.array(valuedomain)
domains = [np.array(vardomains[str(v)]) for v in m.vars]
if check_int:
check_int_to_1(m, value, domains)
if check_der:
check_dlogp(m, value, domains)
def check_int_to_1(model, value, domains):
pdf = compilef(exp(model.logp))
lower, upper = np.min(domains[-1]), np.max(domains[-1])
domains = [d[1:-1] for d in domains[:-1]]
for a in its.product(*domains):
a = a + (value.tag.test_value,)
pt = Point(dict((
str(var), val) for var, val in zip(model.vars, a)), model=model)
bij = DictToVarBijection(value, (), pt)
pdfx = bij.mapf(pdf)
if value.dtype in continuous_types:
area = integrate.quad(pdfx, lower, upper, epsabs=1e-8)[0]
else:
area = np.sum(map(pdfx, np.arange(lower, upper + 1)))
assert_almost_equal(area, 1, err_msg=str(pt))
def check_dlogp(model, value, domains):
domains = [d[1:-1] for d in domains]
bij = DictToArrayBijection(
ArrayOrdering(model.cont_vars), model.test_point)
if not model.cont_vars:
return
dlp = model.dlogpc()
dlogp = bij.mapf(model.dlogpc())
lp = model.logpc
logp = bij.mapf(model.logpc)
ndlogp = Gradient(logp)
for a in its.product(*domains):
pt = Point(dict((
str(var), val) for var, val in zip(model.vars, a)), model=model)
pt = bij.map(pt)
|
Python
| 0.000021
|
@@ -428,16 +428,25 @@
, 0, 10%5D
+, 'int64'
)%0ARplusu
@@ -502,16 +502,25 @@
10, 100%5D
+, 'int64'
)%0A%0AI = a
|
c3d856561887c61839a3781251f36929af0e8718
|
relax bool
|
pycaffe2/utils.py
|
pycaffe2/utils.py
|
from caffe2.proto import caffe2_pb2
from caffe.proto import caffe_pb2
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import numpy as np
def CaffeBlobToNumpyArray(blob):
return np.asarray(blob.data, dtype=np.float32).reshape(
blob.num, blob.channels, blob.height, blob.width)
def Caffe2TensorToNumpyArray(tensor):
return np.asarray(tensor.float_data, dtype=np.float32).reshape(tensor.dims)
def NumpyArrayToCaffe2Tensor(arr, name):
tensor = caffe2_pb2.TensorProto()
tensor.data_type = caffe2_pb2.TensorProto.FLOAT
tensor.name = name
tensor.dims.extend(arr.shape)
tensor.float_data.extend(list(arr.flatten().astype(float)))
return tensor
def MakeArgument(key, value):
"""Makes an argument based on the value type."""
argument = caffe2_pb2.Argument()
argument.name = key
if type(value) is float:
argument.f = value
elif type(value) is int:
argument.i = value
elif type(value) is str:
argument.s = value
elif isinstance(value, Message):
argument.s = value.SerializeToString()
elif all(type(v) is float for v in value):
argument.floats.extend(value)
elif all(type(v) is int for v in value):
argument.ints.extend(value)
elif all(type(v) is str for v in value):
argument.strings.extend(value)
elif all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in values])
else:
raise ValueError("Unknown argument type: key=%s value=%s, value type=%s" %
(key, str(value), str(type(value))))
return argument
def TryReadProtoWithClass(cls, s):
"""Reads a protobuffer with the given proto class.
Inputs:
cls: a protobuffer class.
s: a string of either binary or text protobuffer content.
Outputs:
proto: the protobuffer of cls
Throws:
google.protobuf.message.DecodeError: if we cannot decode the message.
"""
obj = cls()
try:
text_format.Parse(s, obj)
return obj
except text_format.ParseError as e:
obj.ParseFromString(s)
return obj
def GetContentFromProto(obj, function_map):
"""Gets a specific field from a protocol buffer that matches the given class.
"""
for cls, func in function_map.iteritems():
if type(obj) is cls:
return func(obj)
def GetContentFromProtoString(s, function_map):
for cls, func in function_map.iteritems():
try:
obj = TryReadProtoWithClass(cls, s)
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.")
|
Python
| 0.999777
|
@@ -917,17 +917,119 @@
) is int
-:
+ or type(value) is bool:%0A # We make a relaxation that a boolean variable will also be stored as int.
%0A arg
|
bfdc6fe5d0a8f6200f80c071fe4d3150020b4cdd
|
version 20200404.1
|
pydal/__init__.py
|
pydal/__init__.py
|
__version__ = "20200321.1"
from .base import DAL
from .objects import Field
from .helpers.classes import SQLCustomType
from .helpers.methods import geoPoint, geoLine, geoPolygon
|
Python
| 0
|
@@ -17,11 +17,11 @@
0200
-321
+404
.1%22%0A
|
d8c280170cd8c9be6af9c913360ffce28e1bda7a
|
remove debug code
|
src/cineca.py
|
src/cineca.py
|
#!/usr/bin/env python
from collections import namedtuple
from six.moves import input
from operator import itemgetter
from tabulate import tabulate
from scopus import ScopusClient
from utils import *
import traceback
import os.path
import sqlite3
import json
import sys
from colorama import init, Fore, Style
init(autoreset=True)
_RA= Style.RESET_ALL
Entry = namedtuple('Entry', ['ID', 'affil_country', 'affil_city', 'affil_name',
'name', 'surname', 'documents', 'freqs'])
def get_frequency_for_area(entry):
areas = entry.get('subject-area', [])
if isinstance(areas, dict): areas = [areas]
return ', '.join('%s: %s' % (a['@abbrev'], a['@frequency']) for a in areas)
def make_entry(entry):
affiliation = entry.get('affiliation-current', {})
return Entry(
ID=entry['dc:identifier'].partition(':')[2],
affil_country=affiliation.get('affiliation-country'),
affil_city=affiliation.get('affiliation-city'),
affil_name=affiliation.get('affiliation-name'),
name=entry['preferred-name']['given-name'],
surname=entry['preferred-name']['surname'],
documents=int(entry['document-count']),
freqs=get_frequency_for_area(entry))
def entry_score(query, entry):
return max(fuzzy_score(query, entry.affil_name),
fuzzy_score(query, entry.affil_city))
def sorted_entries(entries, ateneo):
entries = [(make_entry(e), e) for e in entries]
entries = [(e[0], e[1], entry_score(ateneo, e[0])) for e in entries]
key = lambda entry: (entry[2], entry[0].documents)
return sorted(entries, key=key, reverse=True)
def get_entries(sc, namefield, **kwargs):
for name, surname in iterate_names(*split_name(namefield)):
entries = sc.get_authors(authfirst=name, authlast=surname, **kwargs)
if entries: return entries
return []
def show_entries(entries):
print(tabulate([[i, e[2]*100.] + list(e[0]) for i,e in enumerate(entries)],
headers=('idx', '% match') + Entry._fields, tablefmt="grid"))
def user_select_entries(entries):
while True:
inp = input(Fore.YELLOW + '\ncomma separated indexes: ' + _RA)
try:
if not inp: return []
chosen = list(map(int, inp.strip().split(',')))
if min(chosen)<0 or max(chosen)>=len(entries): raise ValueError
print(chosen)
return [entries[c] for c in chosen]
except ValueError:
print(Fore.RED + 'Indexes must be 0 <= idx < %d' % len(entries) + _RA)
def init_db(dbfile):
if os.path.isfile(dbfile):
inp = input(Fore.RED + "Database file already exists. " +
"Type 'yes' to append, anything else to quit: " + _RA)
if inp.strip() != 'yes': sys.exit(0)
with sqlite3.connect(dbfile) as connection:
connection.execute('CREATE TABLE IF NOT EXISTS'
' authors(author, ateneo, id UNIQUE, entry)')
def main(apikey, filename, dbfile, extra_params=None, olddbfile=None, exclude=None):
sc = ScopusClient(apikey)
init_db(dbfile)
default_ateneo = None
extra_params = extra_params or dict()
for row in read_cineca_file(filename):
if 'Ateneo' not in row and not default_ateneo:
default_ateneo = input(Fore.YELLOW +
"No 'Ateneo' field, insert default value (e.g. Bergamo): " + _RA)
namefield, ateneo = row['Cognome e Nome'], row.get('Ateneo', default_ateneo)
print('\n%s\n\n%s\n' % ('='*80, row))
fascia = row['Fascia']
if exclude and (fascia in exclude):
print(Fore.MAGENTA + 'Skipped (fascia: ' + fascia + ')\n' + _RA)
continue
else:
continue
previous_entries = []
previous_ids = []
if olddbfile:
with sqlite3.connect(olddbfile) as connection:
cursor = connection.cursor()
cursor.execute('SELECT entry FROM authors WHERE author=? AND ateneo=?',
(namefield, ateneo))
data = [json.loads(row[0]) for row in cursor.fetchall()]
previous_entries = sorted_entries(data, ateneo)
previous_ids = [e[0].ID for e in previous_entries]
try:
entries = sorted_entries(get_entries(sc, namefield, **extra_params), ateneo)
if len(entries) == 0:
print(Fore.RED + '\nNo entries for this author\n' + _RA)
if previous_entries:
print(Fore.YELLOW + '\nBut previous entries exist:\n' + _RA)
show_entries(previous_entries)
inp = input(Fore.YELLOW + "\nUse previous entries? (Y/n) " + _RA)
if inp.strip().lower() == 'n': continue
else:
entries = previous_entries
else:
print(Fore.GREEN + 'New entries:\n' + _RA)
show_entries(entries)
if len(entries) == 1 and entries[0][2] >= 0.6:
print(Fore.GREEN + '\nSingle good entry for this author\n' + _RA)
if previous_entries:
if [entries[0][0].ID] == previous_ids:
print(Fore.GREEN + '\nWhich is the same as the old one.\n' + _RA)
else:
print(Fore.YELLOW + '\nBut differs from the old ones, which are:\n' + _RA)
show_entries(previous_entries)
inp = input(Fore.YELLOW + '\nKeep (o)ld, (n)ew, or (b)oth? (default=n) '
+ _RA).strip().lower()
if inp == 'o':
entries = previous_entries
elif inp == 'b':
entries.extend(previous_entries)
else:
print(Fore.CYAN + '\nOLD DB IDs: ' + ' '.join(previous_ids) + _RA + '\n')
entries = user_select_entries(entries)
with sqlite3.connect(dbfile) as connection:
connection.executemany('INSERT OR IGNORE INTO authors VALUES (?,?,?,?)',
((namefield, ateneo, e[0].ID, json.dumps(e[1]))
for e in entries))
except KeyboardInterrupt: break
except Exception:
traceback.print_exc()
input(Fore.RED + '\nERROR processing ' + namefield +
'. Press any key to continue..' + _RA)
if __name__ == '__main__':
from config import APIKEY, FILENAME, DBFILE, EXTRA_PARAMS, OLDDBFILE, EXCLUDE
main(APIKEY, FILENAME, DBFILE, EXTRA_PARAMS, OLDDBFILE, EXCLUDE)
|
Python
| 0.02323
|
@@ -3710,43 +3710,8 @@
inue
-%0A else:%0A continue
%0A%0A
|
931c016e43402f847c6e58b4679f7f5cf132776f
|
add DESI_LOGLEVEL environment variable, add doc, define WARNING... in module
|
py/desispec/log.py
|
py/desispec/log.py
|
"""
Utility functions to dump log messages
We can have something specific for DESI in the future but for now we use the standard python
"""
import sys
import logging
desi_logger = None
def get_logger(level=logging.DEBUG) :
"""
returns a default desi logger
"""
global desi_logger
if desi_logger is not None :
return desi_logger
desi_logger = logging.getLogger("DESI")
desi_logger.setLevel(level)
while len(desi_logger.handlers) > 0:
h = desi_logger.handlers[0]
desi_logger.removeHandler(h)
ch = logging.StreamHandler(sys.stdout)
#formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
formatter = logging.Formatter('%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s: %(message)s')
ch.setFormatter(formatter)
desi_logger.addHandler(ch)
return desi_logger
|
Python
| 0
|
@@ -160,16 +160,25 @@
logging%0A
+import os
%0A%0Adesi_l
@@ -195,159 +195,1923 @@
ne%0A%0A
-def get_logger(level=logging.DEBUG) :%0A %22%22%22 %0A returns a default desi logger %0A %22%22%22%0A%0A global desi_logger%0A %0A if desi_logger is not None :
+# just for convenience to avoid importing logging%0A# we duplicate the logging levels%0ADEBUG=logging.DEBUG # Detailed information, typically of interest only when diagnosing problems.%0AINFO=logging.INFO # Confirmation that things are working as expected.%0AWARNING=logging.WARNING # An indication that something unexpected happened, or indicative of some problem %0A # in the near future (e.g. %22disk space low%22). The software is still working as expected.%0AERROR=logging.ERROR # Due to a more serious problem, the software has not been able to perform some function.%0ACRITICAL=logging.CRITICAL # A serious error, indicating that the program itself may be unable to continue running.%0A%0A# see example of usage in test/test_log.py%0A%0A%0Adef get_logger(level=None) :%0A %22%22%22 %0A returns a default desi logger%0A%0A Args:%0A level: debugging level.%0A %0A If level=None, will look for environment variable DESI_LOGLEVEL, accepting only values DEBUG,INFO,WARNING,ERROR.%0A If DESI_LOGLEVEL is not set, default level is INFO. %0A %22%22%22%0A%0A if level is None :%0A desi_level=os.getenv(%22DESI_LOGLEVEL%22)%0A if desi_level is None : %0A level=INFO%0A else :%0A dico=%7B%22DEBUG%22:DEBUG,%22INFO%22:INFO,%22WARNING%22:WARNING,%22ERROR%22:ERROR%7D%0A if dico.has_key(desi_level) :%0A level=dico%5Bdesi_level%5D%0A else :%0A # amusingly I need the logger to dump a warning here%0A logger=get_logger(level=WARNING)%0A message=%22ignore DESI_LOGLEVEL=%25s (only recognize%22%25desi_level%0A for k in dico :%0A message+=%22 %25s%22%25k%0A message+=%22)%22%0A logger.warning(message)%0A level=INFO%0A %0A %0A global desi_logger%0A %0A if desi_logger is not None :%0A if level is not None :%0A desi_logger.setLevel(level)
%0A
@@ -2610,16 +2610,26 @@
matter)%0A
+ %0A %0A
desi
@@ -2651,16 +2651,17 @@
ler(ch)%0A
+%0A
retu
|
16fd5a92ec48f9f35f07d51160aa2cc59a3b339b
|
add args to update_progress signal
|
src/client.py
|
src/client.py
|
import libxml2, sys, time, os, weakref, cPickle
import logging
from kaa import ipc, db
from kaa.notifier import Signal, OneShotTimer, execute_in_timer
from server import *
from channel import *
from program import *
__all__ = ['Client']
log = logging.getLogger()
class Client(object):
def __init__(self, server_or_socket, auth_secret = None):
self.connected = True
self._ipc = ipc.IPCClient(server_or_socket, auth_secret = auth_secret)
self._server = self._ipc.get_object("guide")
self.signals = {
"updated": Signal(),
"update_progress": Signal(),
"disconnected": Signal()
}
self._load()
self._ipc.signals["closed"].connect(self._disconnected)
# Connect to server signals. The callbacks itself are called with
# a OneShotTimer to avoid some strange problems because of the ipc
# code (the server will wait for the return)
# FIXME: this whole signals over ipc stuff is ugly
self._server.signals["updated"].connect(self._updated)
self._server.signals["update_progress"].connect(self._update_progress)
def _disconnected(self):
self.connected = False
self.signals["disconnected"].emit()
execute_in_timer(OneShotTimer, 0)
def _updated(self):
self._load()
self.signals["updated"].emit()
execute_in_timer(OneShotTimer, 0)
def _update_progress(self):
self.signals["update_progress"].emit()
def _load(self):
self._channels_by_name = {}
self._channels_by_db_id = {}
self._channels_by_tuner_id = {}
self._channels_list = []
data = self._server.query(type="channel", __ipc_noproxy_result = True)
for row in db.iter_raw_data(data, ("id", "tuner_id", "name", "long_name")):
db_id, tuner_id, name, long_name = row
chan = Channel(tuner_id, name, long_name, self)
chan.db_id = db_id
self._channels_by_name[name] = chan
self._channels_by_db_id[db_id] = chan
for t in tuner_id:
if self._channels_by_tuner_id.has_key(t):
log.warning('loading channel %s with tuner_id %s '+\
'allready claimed by channel %s',
chan.name, t,
self._channels_by_tuner_id[t].name)
else:
self._channels_by_tuner_id[t] = chan
self._channels_list.append(chan)
# get attributes from server and store local
self._max_program_length = self._server.get_max_program_length()
self._num_programs = self._server.get_num_programs()
def _program_rows_to_objects(self, query_data):
cols = "parent_id", "id", "start", "stop", "title", "desc", \
"subtitle", "episode", "genre", "rating"
results = []
for row in db.iter_raw_data(query_data, cols):
if row[0] not in self._channels_by_db_id:
continue
channel = self._channels_by_db_id[row[0]]
program = Program(channel, *row[2:])
results.append(program)
return results
def search(self, **kwargs):
if not self.connected:
return []
if "channel" in kwargs:
ch = kwargs["channel"]
if type(ch) == Channel:
kwargs["channel"] = ch.db_id
elif type(ch) == tuple and len(ch) == 2:
kwargs["channel"] = db.QExpr("range", (ch[0].db_id, ch[1].db_id))
else:
raise ValueError, "channel must be Channel object or tuple of 2 Channel objects"
if "time" in kwargs:
if type(kwargs["time"]) in (int, float, long):
# Find all programs currently playing at the given time. We
# add 1 second as a heuristic to prevent duplicates if the
# given time occurs on a boundary between 2 programs.
start, stop = kwargs["time"] + 1, kwargs["time"] + 1
else:
start, stop = kwargs["time"]
max = self.get_max_program_length()
kwargs["start"] = db.QExpr("range", (int(start) - max, int(stop)))
kwargs["stop"] = db.QExpr(">=", int(start))
del kwargs["time"]
kwargs["type"] = "program"
data = self._server.query(__ipc_noproxy_result = True, **kwargs)
if not data[1]:
return []
return self._program_rows_to_objects(data)
def new_channel(self, tuner_id=None, name=None, long_name=None):
"""
Returns a channel object that is not associated with the EPG.
This is useful for clients that have channels that do not appear
in the EPG but wish to handle them anyway.
"""
# require at least one field
if not tuner_id and not name and not long_name:
log.error('need at least one field to create a channel')
return None
if not name:
# then there must be one of the others
if tuner_id:
name = tuner_id[0]
else:
name = long_name
if not long_name:
# then there must be one of the others
if name:
long_name = name
elif tuner_id:
long_name = tuner_id[0]
return Channel(tuner_id, name, long_name, epg=None)
def get_channel(self, name):
if name not in self._channels_by_name:
return None
return self._channels_by_name[name]
def get_channel_by_db_id(self, db_id):
if db_id not in self._channels_by_db_id:
return None
return self._channels_by_db_id[db_id]
def get_channel_by_tuner_id(self, tuner_id):
if tuner_id not in self._channels_by_tuner_id:
return None
return self._channels_by_tuner_id[tuner_id]
def get_max_program_length(self):
return self._max_program_length
def get_num_programs(self):
return self._num_programs
def get_channels(self):
return self._channels_list
def update(self, *args, **kwargs):
if not self.connected:
return False
# updated signal will fire when this call completes.
kwargs["__ipc_oneway"] = True
kwargs["__ipc_noproxy_args"] = True
self._server.update(*args, **kwargs)
|
Python
| 0.000001
|
@@ -1439,24 +1439,41 @@
rogress(self
+, *args, **kwargs
):%0A s
@@ -1508,16 +1508,31 @@
%22%5D.emit(
+*args, **kwargs
)%0A%0A
|
c8c2785b156523204e530cd78268686886ce2a37
|
Fix incorrect module publics
|
py/oldfart/make.py
|
py/oldfart/make.py
|
import os
import re
import subprocess
__all__ = ['NOOP', 'SUCCESS', 'FAIL', 'Maker']
NOTHING_DONE = 1
SUCCESS = 2
NO_RULE = 3
FAILURE = 4
class Maker(object):
def __init__(self, project_dir='.', makefile='Makefile'):
self.project_dir = os.path.abspath(project_dir)
self.makefile = os.path.abspath(os.path.join(project_dir, makefile))
def make(self, target):
"""Runs `make(1)` on `target` and returning a tuple `(status, output)`
where `status` is one of:
- `make.SUCCESS`: the target was successfully generated
- `make.NOTHING_DONE`: the target was already up-to-date
- `make.NO_RULE`: there is no rule to build the requested target
- `make.FAILURE`: `make(1)` exited otherwise with a non-zero error code
Returned `output` contains always the mixed output from `stdout` and
`stderr`.
"""
try:
capture = subprocess.check_output(
['make', '--makefile=' + self.makefile, target],
cwd=self.project_dir, stderr=subprocess.STDOUT,
universal_newlines=True)
if re.match(r"make: `[^']*' is up to date.", capture):
return (NOTHING_DONE, capture)
else:
return (SUCCESS, capture)
except subprocess.CalledProcessError as e:
if re.match(r"make: \*\*\* No rule to make target `{:s}'. Stop."
.format(target), e.output):
return (NO_RULE, e.output)
else:
return (FAILURE, e.output)
|
Python
| 0.000025
|
@@ -51,10 +51,18 @@
%5B'NO
-OP
+THING_DONE
', '
@@ -74,14 +74,28 @@
SS',
+ 'NO_RULE',
'FAIL
+URE
', '
|
89c2e367e4d691e83ccf92055c1dc8be59e05497
|
use list comprehensions for legibility
|
pynder/session.py
|
pynder/session.py
|
from . import api
from . import models
class Session(object):
def __init__(self, facebook_id, facebook_token):
self._api = api.TinderAPI()
# perform authentication
self._api.auth(facebook_id, facebook_token)
self.profile = models.Profile(self._api.profile(), self)
def nearby_users(self):
return map(lambda user: models.Hopeful(user, self),
self._api.recs()['results'])
def update_location(self, latitude, longitude):
return self._api.ping(latitude, longitude)
def matches(self):
return map(lambda match: models.Match(match, self),
self._api.matches())
|
Python
| 0.000001
|
@@ -345,25 +345,9 @@
urn
-map(lambda user:
+%5B
mode
@@ -362,38 +362,24 @@
ul(u
-ser
, self)
-,%0A
+ for u in
sel
@@ -402,17 +402,17 @@
esults'%5D
-)
+%5D
%0A%0A de
@@ -551,26 +551,9 @@
urn
-map(lambda match:
+%5B
mode
@@ -566,39 +566,24 @@
ch(m
-atch
, self)
-,%0A
+ for m in
sel
@@ -598,10 +598,10 @@
atches()
-)
+%5D
%0A
|
042c32cbeca30da82239e7f6b9d83e88a2391dce
|
Fix name mangling to also replace : with _
|
pybindgen/utils.py
|
pybindgen/utils.py
|
import sys
from typehandlers.codesink import CodeSink
from typehandlers.base import TypeConfigurationError, CodeGenerationError
import version
import settings
def write_preamble(code_sink, min_python_version=(2, 3)):
"""
Write a preamble, containing includes, #define's and typedef's
necessary to correctly compile the code with the given minimum python
version.
"""
assert isinstance(code_sink, CodeSink)
assert isinstance(min_python_version, tuple)
code_sink.writeln('''/* This file was generated by PyBindGen %s */
#define PY_SSIZE_T_CLEAN
#include <Python.h>
''' % '.'.join([str(x) for x in version.__version__]))
if min_python_version < (2, 4):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x02040000
#define Py_CLEAR(op) \
do { \
if (op) { \
PyObject *tmp = (PyObject *)(op); \
(op) = NULL; \
Py_DECREF(tmp); \
} \
} while (0)
#endif
''')
if min_python_version < (2, 5):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
typedef inquiry lenfunc;
typedef intargfunc ssizeargfunc;
typedef intobjargproc ssizeobjargproc;
#define Py_VISIT(op) \
do { \
if (op) { \
int vret = visit((PyObject *)(op), arg); \
if (vret) \
return vret; \
} \
} while (0)
#endif
''')
code_sink.writeln(r'''
#if __GNUC__ > 2
# define PYBINDGEN_UNUSED(param) param __attribute__((__unused__))
#elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
# define PYBINDGEN_UNUSED(param) __attribute__((__unused__)) param
#else
# define PYBINDGEN_UNUSED(param)
#endif /* !__GNUC__ */
''')
def get_mangled_name(base_name, template_args):
"""for internal pybindgen use"""
assert isinstance(base_name, str)
assert isinstance(template_args, (tuple, list))
if template_args:
return '%s__lt__%s__gt__' % (base_name, '_'.join(
[arg.replace(' ', '_') for arg in template_args]))
else:
return base_name
class SkipWrapper(Exception):
"""Exception that is raised to signal a wrapper failed to generate but
must simply be skipped.
for internal pybindgen use"""
def call_with_error_handling(callable, args, kwargs, wrapper,
exceptions_to_handle=(TypeConfigurationError,CodeGenerationError)):
"""for internal pybindgen use"""
if settings.error_handler is None:
return callable(*args, **kwargs)
else:
try:
return callable(*args, **kwargs)
except Exception, ex:
if isinstance(ex, exceptions_to_handle):
dummy1, dummy2, traceback = sys.exc_info()
if settings.error_handler.handle_error(wrapper, ex, traceback):
raise SkipWrapper
else:
raise
else:
raise
|
Python
| 0.000022
|
@@ -2249,16 +2249,34 @@
ace(' ',
+ '_').replace(':',
'_') fo
|
f2a83197d9e0eaf04a3c062aa9a16c197ffd3c4d
|
add error logging
|
pyroute2/proxy.py
|
pyroute2/proxy.py
|
'''
Netlink proxy engine
'''
import errno
import struct
import threading
class NetlinkProxy(object):
'''
Proxy schemes::
User -> NetlinkProxy -> Kernel
|
<---------+
User <- NetlinkProxy <- Kernel
'''
def __init__(self, policy='forward', nl=None, lock=None):
self.nl = nl
self.lock = lock or threading.Lock()
self.pmap = {}
self.policy = policy
def handle(self, data):
#
# match the packet
#
ptype = struct.unpack('H', data[4:6])[0]
plugin = self.pmap.get(ptype, None)
if plugin is not None:
with self.lock:
try:
ret = plugin(data, self.nl)
if ret is None:
msg = struct.pack('IHH', 40, 2, 0)
msg += data[8:16]
msg += struct.pack('I', 0)
# nlmsgerr struct alignment
msg += b'\0' * 20
return {'verdict': self.policy,
'data': msg}
else:
return ret
except Exception as e:
# errmsg
if isinstance(e, (OSError, IOError)):
code = e.errno
else:
code = errno.ECOMM
msg = struct.pack('HH', 2, 0)
msg += data[8:16]
msg += struct.pack('I', code)
msg += data
msg = struct.pack('I', len(msg) + 4) + msg
return {'verdict': 'error',
'data': msg}
return None
|
Python
| 0.000004
|
@@ -49,16 +49,48 @@
struct%0A
+import logging%0Aimport traceback%0A
import t
@@ -1257,16 +1257,74 @@
n as e:%0A
+ logging.error(traceback.format_exc())%0A
|
cf0c31f553ea5a8265bd3213a4c5d384d0e72127
|
Unify double quotes.
|
mail_forward/wizard/mail_forward.py
|
mail_forward/wizard/mail_forward.py
|
# -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.osv import fields, orm
class MailComposeForward(orm.TransientModel):
"""Allow forwarding a message.
It duplicates the message and optionally attaches it to another object
of the database and sends it to another recipients than the original one.
"""
_name = "mail.compose.forward"
_inherit = "mail.compose.message"
_models = [
"crm.lead",
"crm.meeting",
"crm.phonecall",
"mail.group",
"note.note",
"product.product",
"project.project",
"project.task",
"res.partner",
"sale.order",
]
def models(self, cr, uid, context=None):
"""Get allowed models and their names.
It searches for the models on the database, so if modules are not
installed, models will not be shown.
"""
context = dict(context) if context else dict()
model_pool = self.pool.get('ir.model')
model_ids = model_pool.search(
cr, uid,
[('model', 'in', context.get("model_list", self._models))],
order="name", context=context)
model_objs = model_pool.browse(cr, uid, model_ids, context=context)
return [(m.model, m.name) for m in model_objs]
_columns = {
"destination_object_id": fields.reference(
"Destination object",
selection=models,
size=128,
help="Object where the forwarded message will be attached"),
"move_attachments": fields.boolean(
"Move attachments",
help="Attachments will be assigned to the chosen destination "
"object and you will be able to pick them from its "
"'Attachments' button, but they will not be there for "
"the current object if any. In any case you can always "
"open it from the message itself."),
# Override static relation table names in mail.compose.message
"partner_ids": fields.many2many(
"res.partner",
"mail_compose_forward_res_partner_rel",
"wizard_id",
"partner_id",
"Additional Contacts"),
"attachment_ids": fields.many2many(
"ir.attachment",
"mail_compose_forward_ir_attachments_rel",
"wizard_id",
"attachment_id",
"Attachments"),
}
def default_get(self, cr, uid, fields, context=None):
"""Fix default values.
Sometimes :meth:`openerp.addons.mail.mail_compose_message
.mail_compose_message.default_get` overwrites the default value
for the ``subject`` field, even when it gets the right default value
from the context.
This method fixes that by getting it from the context if available.
"""
context = dict(context) if context else dict()
result = super(MailComposeForward, self).default_get(
cr, uid, fields, context)
if "subject" in result and "default_subject" in context:
result["subject"] = context["default_subject"]
return result
def onchange_destination_object_id(self, cr, uid, ids,
destination_object_id, context=None):
"""Update some fields for the new message."""
context = dict(context) if context else dict()
model = res_id = res_name = False
if destination_object_id:
model, res_id = destination_object_id.split(",")
res_id = int(res_id)
context["model_list"] = context.get("model_list", [model])
model_name = dict(self.models(cr, uid, context=context)).get(model)
res_name = (self.pool.get(model)
.name_get(cr, uid, res_id, context=context)[0][1])
if model_name:
res_name = "%s %s" % (model_name, res_name)
return {"value": {"model": model,
"res_id": res_id,
"record_name": res_name}}
def send_mail(self, cr, uid, ids, context=None):
"""Send mail and execute the attachment relocation if needed."""
# Let the parent do de hard work
result = super(MailComposeForward, self).send_mail(
cr, uid, ids, context=context)
# Relocate attachments if needed
att_pool = self.pool.get("ir.attachment")
for wz in self.browse(cr, uid, ids, context=context):
if (wz.move_attachments and
wz.model and
wz.res_id and
wz.attachment_ids):
att_pool.write(
cr,
uid,
[att.id for att in wz.attachment_ids],
{"res_model": wz.model, "res_id": wz.res_id},
context=context)
return result
|
Python
| 0.000134
|
@@ -1707,17 +1707,17 @@
get(
-'
+%22
ir.model
')%0A
@@ -1712,17 +1712,17 @@
ir.model
-'
+%22
)%0A
@@ -1793,21 +1793,21 @@
%5B(
-'
+%22
model
-', 'in'
+%22, %22in%22
, co
|
82379ddc145673cca008127099e4bdc2e0aa503b
|
make sure we won't traverse 'None' object
|
pyswagger/scan.py
|
pyswagger/scan.py
|
from __future__ import absolute_import
from .base import BaseObj
from .utils import scope_compose
import six
def default_tree_traversal(root):
""" default tree traversal """
objs = [(None, None, root)]
while len(objs) > 0:
scope, name, obj = objs.pop()
# get children
new_scope = scope_compose(scope, name)
objs.extend(map(lambda c: (new_scope,) + c, obj._children_))
yield scope, name, obj
class DispatcherMeta(type):
""" metaclass for Dispatcher
"""
def __new__(metacls, name, bases, spc):
if 'obj_route' not in spc.keys():
# forcely create a new obj_route
# but not share the same one with parents.
spc['obj_route'] = {}
spc['result_fn'] = [None]
return type.__new__(metacls, name, bases, spc)
class Dispatcher(six.with_metaclass(DispatcherMeta, object)):
""" Dispatcher
"""
obj_route = {}
result_fn = [None]
@classmethod
def __add_route(cls, t, f):
"""
"""
if not issubclass(t, BaseObj):
raise ValueError('target_cls should be a subclass of BaseObj, but got:' + str(t))
# allow register multiple handler function
# against one object
if t in cls.obj_route.keys():
cls.obj_route[t].append(f)
else:
cls.obj_route[t] = [f]
@classmethod
def register(cls, target):
"""
"""
def outer_fn(f):
# what we did is simple,
# register target_cls as key, and f as callback
# then keep this record in cls.
for t in target:
cls.__add_route(t, f)
# nothing is decorated. Just return original one.
return f
return outer_fn
@classmethod
def result(cls, f):
"""
"""
# avoid bound error
cls.result_fn = [f]
return f
class Scanner(object):
""" Scanner
"""
def __init__(self, app):
super(Scanner, self).__init__()
self.__app = app
@property
def app(self):
return self.__app
def __build_route(self, route):
"""
"""
ret = []
for r in route:
for attr in r.__class__.__dict__:
o = getattr(r, attr)
if type(o) == DispatcherMeta:
ret.append((r, o.obj_route, o.result_fn[0]))
return ret
def scan(self, route, root, nexter=default_tree_traversal):
"""
"""
merged_r = self.__build_route(route)
for scope, name, obj in nexter(root):
for the_self, r, res in merged_r:
def handle_cls(cls):
f = r.get(cls, None)
if f:
for ff in f:
ret = ff(the_self, scope, name, obj, self.app)
if res:
res(the_self, ret)
for cls in obj.__class__.__mro__[:-1]:
if cls is BaseObj:
break
handle_cls(cls)
|
Python
| 0.99859
|
@@ -2527,32 +2527,121 @@
%22%22%22%0A %22%22%22%0A
+ if root == None:%0A raise ValueError('Can%5C't scan because root==None')%0A%0A
merged_r
|
d10ff922cdd9eab4ff78bd94b7170f74148f97f3
|
Version bump to 1.10
|
pyglui/__init__.py
|
pyglui/__init__.py
|
__version__ = '1.9'
|
Python
| 0
|
@@ -14,7 +14,8 @@
'1.
-9
+10
'%0A
|
4aad9edfbcb657f2c1f97dc73b245f8fa92341db
|
define a run method that does nothing
|
pyipmi/__init__.py
|
pyipmi/__init__.py
|
"""pyipmi provides IPMI client functionality"""
from __future__ import print_function
__all__ = ['Handle', 'Tool', 'Command', 'make_bmc']
class Handle:
"""A handle to speak with a BMC
Handles use a Tool to speak with a BMC. It's basically a session handle
from its user's perspective, although handles may or may not use a single
ipmi session for their duration, depending on their implementation.
The Handle class itself is concrete, but may become abstract in the future.
"""
def __init__(self, bmc, tool_class, command_list):
"""
Arguments:
bmc -- A BMC object
tool_class -- the class of the tool to be used for this handle,
for example, IpmiTool.
command_list -- a list of Commands to be made available to this handle.
"""
self.bmc = bmc
self._tool = tool_class(self, command_list)
self._add_command_stubs(command_list)
self._log_file = None
def _add_command_stubs(self, command_list):
"""Adds command methods to an instance of Handle
Each command in the command_list supplied to init will add a method to
this handle instance. Calling that method causes the command to be issued.
"""
for command in command_list:
self._add_command_stub(command)
def _add_command_stub(self, command):
"""Add a a method for a command"""
def _cmd(*args, **kwargs):
"""Call the method of the same name on the tool"""
tool_method = getattr(self._tool, command)
return tool_method(*args, **kwargs)
setattr(self, command, _cmd)
def set_log(self, log_file):
"""Setup a logger for the handle
Arguments:
log_file -- a file like object
"""
self._log_file = log_file
def log(self, string):
"""Write a string to a log
The log is flushed after log is written
Arguments:
string -- the string to log."""
if (self._log_file):
print(string, file = self._log_file)
self._log_file.flush()
class Tool:
"""A tool implements communications with a BMC
Tool is an abstract class - it needs a 'run' method defined to be useful.
Tool implementations vary in the way they implement IPMI communications.
The IpmiTool implementation uses high level ipmitool commands executed via
subprocesses. A freeipmi implementation could do the same using freeipmi
commands, or there could be a RawIpmiTool implementation that used IpmiTool
with raw commands. Another possibility is implementing IPMI natively in
python, and having a NativeIpmi Tool implementation for that.
Tool instances are bound to handle instances - each tool has exactly one
handle.
Tool instances are created with a list of commands - each command in the
list causes a method (named after the command) to be added to the tool
for executing the command. Commands in the list must implement support
for the tool - each tool has
Concrete implementations should go in the tools directory. An example
concrete implementation is the ImpiTool class.
"""
def __init__(self, handle, command_list):
"""
Arguments:
handle -- the handle to which this command is bound
command_list -- the list of commands the tool can execute
"""
self._handle = handle
self._add_command_stubs(command_list)
self._command_list = command_list
def _add_command_stubs(self, command_list):
"""Add command methods to this Tool instance
Just like handles, tools get a method per command in command_list
"""
for command in command_list:
self._add_command_stub(command)
def _add_command_stub(self, command):
"""Add an individual command method"""
def _cmd(*args, **kwargs):
"""An individual command method.
Uses this tool's run method to execute a command in this
tool's special way."""
inst = self._command_list[command](self, *args, **kwargs)
return self.run(inst)
setattr(self, command, _cmd)
def _log(self, string):
"""Log a message via this tool's handle"""
self._handle.log(string)
class Command:
"""A Command describes a specific IPMI command"""
def __init__(self, tool, **params):
self._tool = tool
self._params = params
def make_bmc(bmc_class, logfile = None, **kwargs):
"""Returns a bmc object with 'default' settings
This uses IpmiTool for the tool,the base Handle class, and
the default "ipmi_commands" list of IPMI commands.
kwargs is combined with those default settings into a single
dict with its contents passed as keyword args when calling
bmc_class.
Arguments:
bmc_class -- called w/ kwargs as its parameter to get the
object to return.
Keyword arguments:
logfile -- an optional file object for logging (default none)
"""
from commands import ipmi_commands
from tools import IpmiTool
bmc_kwargs = {
'tool_class' : IpmiTool,
'handle_class' : Handle,
'command_list' : ipmi_commands
}
bmc_kwargs.update(kwargs)
bmc_obj = bmc_class(**bmc_kwargs)
bmc_obj.handle.set_log(logfile)
return bmc_obj
|
Python
| 0.000003
|
@@ -4376,16 +4376,117 @@
tring)%0A%0A
+ def run(self, command):%0A %22%22%22This should be defined in a subclass of Tool%22%22%22%0A pass%0A%0A
class Co
|
060f7400f27f0452eb4ed11ffed07aab16230126
|
Update cts link
|
pynayzr/streams.py
|
pynayzr/streams.py
|
# -*- coding: utf-8 -*-
import os
import asyncio
import subprocess
import tempfile
from PIL import Image
support_news = {
'ttv': 'https://www.youtube.com/watch?v=yk2CUjbyyQY',
'ctv': 'https://www.youtube.com/watch?v=XBne4oJGEhE',
'cts': 'https://www.youtube.com/watch?v=TL8mmew3jb8',
'pts': 'https://www.youtube.com/watch?v=_isseGKrquc',
'ebc': 'https://www.youtube.com/watch?v=dxpWqjvEKaM',
'cti': 'https://www.youtube.com/watch?v=wUPPkSANpyo',
'ftv': 'https://www.youtube.com/watch?v=XxJKnDLYZz4',
'set': 'https://www.youtube.com/watch?v=4ZVUmEUFwaY',
'tvbs': 'https://www.youtube.com/watch?v=Hu1FkdAOws0'
}
def get(news):
"""Get Livestream frame by news media.
Args:
news (str): news media list in support_news
Returns:
Image.Image: PIL Image instance
"""
if news not in support_news:
raise KeyError
# Other news using youtube
with tempfile.TemporaryDirectory() as temp_dir:
streamlink = [
'streamlink',
'-O',
support_news[news],
'720p'
]
ffmpeg = [
'ffmpeg',
'-i',
'-',
'-f',
'image2',
'-vframes',
'1',
'%s/out.jpg' % (temp_dir)
]
p1 = subprocess.Popen(streamlink, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE)
p2 = subprocess.Popen(ffmpeg, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
return Image.open('%s/out.jpg' % (temp_dir))
async def aget(news):
"""Async get livestream frame by news media.
Args:
news (str): news media list in support_news
Returns:
Image.Image: PIL Image instance
"""
if news not in support_news:
raise KeyError
# Other news using youtube
with tempfile.TemporaryDirectory() as temp_dir:
streamlink = ' '.join([
'streamlink',
'-O',
support_news[news],
'720p'
])
ffmpeg = ' '.join([
'ffmpeg',
'-i',
'-',
'-f',
'image2',
'-vframes',
'1',
'%s/out.jpg' % (temp_dir)
])
read, write = os.pipe()
p1 = await asyncio.create_subprocess_shell(
streamlink,
stdout=write,
stderr=asyncio.subprocess.DEVNULL)
os.close(write)
p2 = await asyncio.create_subprocess_shell(
ffmpeg,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
stdin=read)
os.close(read)
await p1.communicate()
await p2.communicate()
return Image.open('%s/out.jpg' % (temp_dir))
async def aget_all():
async def mark(key, coro):
return key, await coro
d = {news: aget(news) for news in support_news}
return {
key: result
for key, result in await asyncio.gather(
*(mark(key, coro) for key, coro in d.items()))
}
|
Python
| 0
|
@@ -223,19 +223,19 @@
h?v=
-XBne4oJGEhE
+hVNbIZYi1nI
',%0A
|
1aa687b70aea9074ae28d1154ae0db4364add26e
|
Rewrite example.py.
|
pyoommf/example.py
|
pyoommf/example.py
|
import sim, mesh
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e6, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 1e-9 # simulation time (s)
# Create a mesh.
mesh = mesh.Mesh(lx, ly, lz, dx, dy, dz)
# Create a simulation object.
sim = sim.Sim(mesh, Ms)
# Add energies.
sim.add_exchange(A)
sim.add_demag()
sim.add_zeeman(H)
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run_until(t_sim)
# Get the results.
results = sim.result()
|
Python
| 0.000003
|
@@ -1,20 +1,125 @@
-import sim, mesh
+from sim import Sim%0Afrom mesh import Mesh%0Afrom exchange import Exchange%0Afrom demag import Demag%0Afrom zeeman import Zeeman
%0A%0A#
@@ -486,21 +486,16 @@
%0Amesh =
-mesh.
Mesh(lx,
@@ -552,20 +552,16 @@
.%0Asim =
-sim.
Sim(mesh
@@ -594,10 +594,10 @@
.add
-_e
+(E
xcha
@@ -602,24 +602,25 @@
hange(A)
+)
%0Asim.add
_demag()
@@ -615,24 +615,25 @@
.add
-_d
+(D
emag()
+)
%0Asim.add
_zee
@@ -632,18 +632,18 @@
.add
-_z
+(Z
eeman(H)
%0A%0A#
@@ -638,17 +638,39 @@
eeman(H)
-%0A
+)%0A%0Asim.create_mif()%0A%22%22%22
%0A# Set i
@@ -792,8 +792,12 @@
esult()%0A
+%22%22%22%0A
|
50ae1d3fb7e14fec94831a4c6667c7b1ba2e073b
|
add python script
|
python/python2.py
|
python/python2.py
|
#!/usr/bin/python2
# -*- coding: UTF-8 -*-
# install
# sudo pip install pymongo
# sudo pip install MySQL-python
# sudo install_name_tool -change libmysqlclient.18.dylib /usr/local/mysql/lib/libmysqlclient.18.dylib /Library/Python/2.7/site-packages/_mysql.so
# sudo pip install requests
# sudo pip install threadpool
# sudo pip install apscheduler
# command
python -V
import sys, pprint
pprint.pprint(sys.path)
dir(copy)
help(copy.copy)
print copy.__doc__
print copy.__file__
import webbrowser
webbrowser.open("http://www.baidu.com")
# script
for letter in 'Python':
print 'current letter:', letter
fruits = ['banana', 'apple', 'mango']
for index in range(len(fruits)):
print 'current fruit:', fruits[index]
with open("/tmp/file.txt") as file:
do(file)
f = open(filename)
for line in f.readlines():
process(line)
f.close()
import fileinput
for line in fileinput.input(line):
process(line)
f = open(filename)
for line in f:
process(line)
f.close()
|
Python
| 0.000019
|
@@ -532,16 +532,152 @@
.com%22)%0A%0A
+import urllib%0Ahtml = urllib.urlopen(%22http://www.baidu.com%22)%0Atemp_file = urllib.urlretrieve(%22http://www.baidu.com%22)%0Aurllib.urlcleanup()%0A%0A
# script
|
c0cbc2458c42bfb116c0d631c837f042f66d33a8
|
Add explanatory comments to Python varargs script
|
python/varargs.py
|
python/varargs.py
|
def f(x, y=1000, *z):
print('x={} y={} z={}'.format(x,y,z))
f(0)
f(0,1)
f(0,1,2)
f(0,1,2,3)
f(0,1,2,3,4)
f(*[i for i in range(6)])
f(*range(7))
|
Python
| 0
|
@@ -64,84 +64,344 @@
f(0)
-%0Af(0,1)%0Af(0,1,2)%0Af(0,1,2,3)%0Af(0,1,2,3,4)%0Af(*%5Bi for i in range(6)%5D)%0Af(*range(7)
+ # x=0, y=1000, z=()%0Af(0,1) # x=0, y=1, z=()%0Af(0,1,2) # x=0, y=1, z=(2,)%0Af(0,1,2,3) # x=0, y=1, z=(2,3)%0Af(0,1,2,3,4) # x=0, y=1, z=(2,3,4)%0Af(*%5Bi for i in range(6)%5D) # x=0, y=1, z=(2,3,4,5)%0Af(*range(7)) # x=0, y=1, z=(2,3,4,5,6
)%0A
|
04c3cac3054626773bc0434453378cb295f7e38c
|
Add handling of invalid values
|
pytus2000/read.py
|
pytus2000/read.py
|
import pandas as pd
from .datadicts import diary
def read_diary_file(path_to_file):
return pd.read_csv(
path_to_file,
delimiter='\t',
nrows=50,
converters=_column_name_to_type_mapping(diary),
low_memory=False # some columns seem to have mixed types
)
def _column_name_to_type_mapping(module):
mapping = {}
for member in module.Variable:
try:
module.__dict__[member.name]
mapping[member.name] = _enum_converter(module.__dict__[member.name])
except KeyError:
pass # nothing to do; there is no enum
return mapping
def _enum_converter(enumcls):
def enum_converter(value):
if value == ' ':
return None
else:
return enumcls(value)
return enum_converter
|
Python
| 0.000007
|
@@ -154,26 +154,8 @@
t',%0A
- nrows=50,%0A
@@ -745,29 +745,189 @@
-return enumcls(value)
+try:%0A value = enumcls(value)%0A except ValueError as ve:%0A print(ve)%0A return None%0A else:%0A return value
%0A
|
fd421a4c5f7cdacdc98aa049b4650c9d1d62267a
|
Fix some issues with open.
|
grit/command/Open.py
|
grit/command/Open.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import platform
import random
from grit import Call
from grit import Git
from grit import GitRoot
from grit import Settings
from grit.String import startswith
HELP = """
grit open [filename]
Open the filename as a Github URL in the browser.
Selects the first file that starts with filename. If filename is missing,
opens the current directory in the browser.
"""
"""
What should we be able to open?
* The current directory.
* A file.
* A found file.
in
* our repo
* the upstream repo
* some other repo.
And:
* A pull request.
* the pull request for this branch, if any.
"""
SAFE = True
_OPEN_COMMANDS = {
'Darwin': 'open',
'Linux': 'xdg-open',
}
_URL = 'https://github.com/{user}/{project}/tree/{branch}/{path}'
def open_url(url):
Call.call('%s %s' % (_OPEN_COMMANDS[platform.system()], url))
def open(filename=''):
if not platform.system() in _OPEN_COMMANDS:
raise ValueError("Can't open a URL for platform.system() = " + plat)
branch = Git.branch()
full_path = os.getcwd()
if filename:
path, f = os.path.split(filename)
full_path = os.path.join(full_path, path)
if not os.path.exists(full_path):
raise ValueError("Path %s doesn't exist." % full_path)
if f:
for p in os.listdir(full_path):
if startswith(p, f):
full_path = os.path.join(full_path, p)
break
else:
raise ValueError("Can't find file matching " + filename)
url = _URL.format(
branch=Git.branch(),
path=os.path.relpath(full_path, GitRoot.ROOT),
project=Settings.PROJECT,
user=Settings.USER)
open_url(url)
|
Python
| 0
|
@@ -851,13 +851,215 @@
url(
-url):
+branch, path,%0A project=Settings.PROJECT,%0A user=Settings.USER):%0A path = os.path.relpath(path, GitRoot.ROOT)%0A u = _URL.format(branch=branch, path=path, project=project, user=user)
%0A
@@ -1116,18 +1116,16 @@
em()%5D, u
-rl
))%0A%0Adef
@@ -1820,187 +1820,50 @@
-url = _URL.format(%0A branch=Git.branch(),%0A path=os.path.relpath(full_path, GitRoot.ROOT),%0A project=Settings.PROJECT,%0A user=Settings.USER)%0A open_url(url
+open_url(branch=Git.branch(), path=full_path
)%0A
|
ace38408875e31e1dfc8e6b1f2e2bf956fffc761
|
- message type 'groupchat' is valid
|
pyxmpp/message.py
|
pyxmpp/message.py
|
#
# (C) Copyright 2003 Jacek Konieczny <jajcus@bnet.pl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import libxml2
from stanza import Stanza,StanzaError
from utils import to_utf8,from_utf8
message_types=("normal","chat","headline","error")
class Message(Stanza):
stanza_type="message"
def __init__(self,node=None,**kw):
self.node=None
if isinstance(node,Message):
pass
elif isinstance(node,Stanza):
raise TypeError,"Couldn't make Message from other Stanza"
elif isinstance(node,libxml2.xmlNode):
pass
elif node is not None:
raise TypeError,"Couldn't make Message from %r" % (type(node),)
if kw.has_key("type") and kw["type"] and kw["type"] not in message_types:
raise StanzaError,"Invalid message type: %r" % (kw["type"],)
if kw.has_key("body"):
body=kw["body"]
del kw["body"]
else:
body=None
if kw.has_key("subject"):
subject=kw["subject"]
del kw["subject"]
else:
subject=None
if kw.has_key("thread"):
thread=kw["thread"]
del kw["thread"]
else:
thread=None
if node is None:
node="message"
apply(Stanza.__init__,[self,node],kw)
if subject is not None:
self.node.newTextChild(None,"subject",to_utf8(subject))
if body is not None:
self.node.newTextChild(None,"body",to_utf8(body))
if thread is not None:
self.node.newTextChild(None,"thread",to_utf8(thread))
def get_subject(self):
n=self.xpath_eval("subject")
if n:
return from_utf8(n[0].getContent())
else:
return None
def get_thread(self):
n=self.xpath_eval("thread")
if n:
return from_utf8(n[0].getContent())
else:
return None
def copy(self):
return Message(self)
def get_body(self):
n=self.xpath_eval("body")
if n:
return from_utf8(n[0].getContent())
else:
return None
def make_error_response(self,cond):
if self.get_type() == "error":
raise StanzaError,"Errors may not be generated in response to errors"
m=Message(type="error",fr=self.get_to(),to=self.get_from(),
id=self.get_id(),error_cond=cond)
if self.node.children:
for n in list(self.node.children):
n=n.copyNode(1)
m.node.children.addPrevSibling(n)
return m
|
Python
| 0.998913
|
@@ -836,16 +836,28 @@
,%22error%22
+,%22groupchat%22
)%0A%0Aclass
|
dbde102d14632bbaef7d6319d0742ac2819d6e38
|
Implement the given spec.
|
mlab-ns-simulator/mlabsim/update.py
|
mlab-ns-simulator/mlabsim/update.py
|
"""
This approximates the mlab-ns slice information gathering. The actual
system uses nagios and we're not certain about the details. This much
simplified version is just a web URL anyone may PUT data into.
Warning: This doesn't have any security properties! We need a way to
prevent the addition of malicious entries.
"""
import logging
import json
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET
DBEntryNames = [
'city',
'country',
'fqdn',
'ip',
'port',
'site',
'tool_extra',
]
class UpdateResource (resource.Resource):
def __init__(self, db):
"""db is a dict which will be modified to map { fqdn -> other_details }"""
resource.Resource.__init__(self)
self._db = db
self._log = logging.getLogger(type(self).__name__)
def render_PUT(self, request):
body = request.content.read()
self._log.debug('Request body: %r', body)
try:
dbentry = json.loads(body)
except ValueError:
request.setResponseCode(400, 'invalid')
request.finish()
return NOT_DONE_YET
fqdn = dbentry['fqdn']
self._db[fqdn] = dbentry
request.setResponseCode(200, 'ok')
request.finish()
return NOT_DONE_YET
|
Python
| 0.000495
|
@@ -1038,75 +1038,263 @@
-request.setResponseCode(400, 'invalid')%0A request.finish(
+self._send_response(request, 400, 'invalid', 'Malformed JSON body.')%0A return NOT_DONE_YET%0A%0A try:%0A fqdn = dbentry%5B'fqdn'%5D%0A except KeyError:%0A self._send_response(request, 400, 'invalid', %22Missing 'fqdn' field.%22
)%0A
@@ -1324,36 +1324,46 @@
NE_YET%0A%0A
+self._db%5B
fqdn
+%5D
= dbentry%5B'fqdn
@@ -1356,24 +1356,16 @@
dbentry
-%5B'fqdn'%5D
%0A%0A
@@ -1376,121 +1376,295 @@
lf._
-db%5Bfqdn%5D = dbentry%0A%0A request.setResponseCode(200, 'ok')%0A request.finish()%0A%0A return NOT_DONE_YET
+send_response(request, 200, 'ok', 'Ok.')%0A return NOT_DONE_YET%0A%0A def _send_response(self, request, code, status, message):%0A request.setResponseCode(code, status)%0A request.setHeader('content-type', 'text/plain')%0A request.write(message)%0A request.finish()%0A
%0A
|
35a8be144c4924ae787da9c05087aee8d25bb3f5
|
refactor __get and __post
|
morfeu/tsuru/client.py
|
morfeu/tsuru/client.py
|
import requests
import logging
from .exceptions import TsuruClientBadResponse
from morfeu.settings import TSURU_TOKEN, TIMEOUT, TSURU_HOST, POOL_WHITELIST
from morfeu.settings import PLATFORM_BLACKLIST, TSURU_APP_PROXY_URL
LOG = logging.getLogger(__name__)
class TsuruClientUrls(object):
@classmethod
def list_apps_url(cls, pool=""):
return "{}/apps?pool={}".format(TSURU_HOST, pool)
@classmethod
def get_app_url(cls, app_name):
return "{0}/apps/{1}".format(TSURU_HOST, app_name)
@classmethod
def get_stop_url(cls, app_name=None, process_name=None):
return "{0}/apps/{1}/stop?process={2}".format(TSURU_HOST, app_name, process_name)
@classmethod
def get_sleep_url(cls, app_name=None, process_name=None, proxy_url=None):
return "{0}/apps/{1}/sleep?proxy={3}&process={2}".format(TSURU_HOST,
app_name,
process_name,
proxy_url)
class TsuruClient(object):
def __init__(self):
self.timeout = TIMEOUT
self.headers = {'Authorization': "bearer {0}".format(TSURU_TOKEN)}
def __get(self, url=None, params={}):
r = requests.get(url, params=params, headers=self.headers, timeout=self.timeout)
if r.status_code == requests.codes.ok:
return r.json()
else:
raise TsuruClientBadResponse("Bad Request {}".format(r.status_code))
def __post(self, url=None, payload={}):
r = requests.post(url, data=payload, headers=self.headers, timeout=self.timeout)
if r.status_code != requests.codes.ok:
raise TsuruClientBadResponse("Bad Request {}".format(r.status_code))
return r
def list_apps(self, type=None, domain=None):
"""
:returns [{"units": [{"ProcessName" : "web"}]}]
"""
LOG.info("Getting apps of type \"{}\" and domain \"{}\"".format(type, domain))
url = TsuruClientUrls.list_apps_url(pool=POOL_WHITELIST)
app_list = []
try:
apps = self.__get(url=url)
except (TsuruClientBadResponse, requests.exceptions.Timeout) as e:
LOG.error(e)
return app_list
for app in apps:
if domain:
if domain not in app.get("ip", ""):
continue
platform = app.get("platform", "")
units = app.get('units', [])
units_list = []
for unit in units:
if unit.get("ProcessName", "") == "web" or platform in PLATFORM_BLACKLIST:
units_list.append(unit["ID"])
if units_list:
app_list.append({app["name"]: units_list})
return app_list
def get_app(self, app_name=None):
if app_name:
url = TsuruClientUrls.get_app_url(app_name)
try:
return self.__get(url=url)
except (TsuruClientBadResponse, requests.exceptions.Timeout) as e:
LOG.error(e)
return {}
else:
return {}
def stop_app(self, app_name=None, process_name="web"):
if not app_name:
return False
url = TsuruClientUrls.get_stop_url(app_name=app_name, process_name=process_name)
try:
req = self.__post(url=url)
LOG.info("App {0} stopped... {1}".format(app_name, req.content))
return True
except (TsuruClientBadResponse, requests.exceptions.Timeout) as e:
LOG.error(e)
return False
def sleep_app(self, app_name=None, process_name="web", proxy_url=None):
if not app_name:
return False
url = TsuruClientUrls.get_sleep_url(app_name=app_name,
process_name=process_name,
proxy_url=TSURU_APP_PROXY_URL)
try:
req = self.__post(url=url)
LOG.info("App {0} asleep... {1}".format(app_name, req.content))
return True
except (TsuruClientBadResponse, requests.exceptions.Timeout) as e:
LOG.error(e)
return False
|
Python
| 0.000147
|
@@ -1262,29 +1262,24 @@
et(self, url
-=None
, params=%7B%7D)
@@ -1275,18 +1275,20 @@
params=
-%7B%7D
+None
):%0A
@@ -1566,21 +1566,16 @@
elf, url
-=None
, payloa
@@ -1576,18 +1576,20 @@
payload=
-%7B%7D
+None
):%0A
|
b8fc002fbc8a83486567c232d62678c3b4bb39b8
|
Update new path
|
hassio/addons/git.py
|
hassio/addons/git.py
|
"""Init file for HassIO addons git."""
import asyncio
import logging
from pathlib import Path
import shutil
import git
from .util import get_hash_from_repository
from ..const import URL_HASSIO_ADDONS
_LOGGER = logging.getLogger(__name__)
class AddonsRepo(object):
"""Manage addons git repo."""
def __init__(self, config, loop, path, url):
"""Initialize git base wrapper."""
self.config = config
self.loop = loop
self.repo = None
self.path = path
self.url = url
self._lock = asyncio.Lock(loop=loop)
async def load(self):
"""Init git addon repo."""
if not self.path.is_dir():
return await self.clone()
async with self._lock:
try:
_LOGGER.info("Load addon %s repository", self.path)
self.repo = await self.loop.run_in_executor(
None, git.Repo, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't load %s repo: %s.", self.path, err)
return False
return True
async def clone(self):
"""Clone git addon repo."""
async with self._lock:
try:
_LOGGER.info("Clone addon %s repository", self.url)
self.repo = await self.loop.run_in_executor(
None, git.Repo.clone_from, self.url, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't clone %s repo: %s.", self.url, err)
return False
return True
async def pull(self):
"""Pull git addon repo."""
if self._lock.locked():
_LOGGER.warning("It is already a task in progress.")
return False
async with self._lock:
try:
_LOGGER.info("Pull addon %s repository", self.url)
await self.loop.run_in_executor(
None, self.repo.remotes.origin.pull)
except (git.InvalidGitRepositoryError, git.NoSuchPathError) as err:
_LOGGER.error("Can't pull %s repo: %s.", self.url, err)
return False
return True
class AddonsRepoHassIO(AddonsRepo):
"""HassIO addons repository."""
def __init__(self, config, loop):
"""Initialize git hassio addon repository."""
super().__init__(
config, loop, config.path_addons_repo, URL_HASSIO_ADDONS)
class AddonsRepoCustom(AddonsRepo):
"""Custom addons repository."""
def __init__(self, config, loop, url):
"""Initialize git hassio addon repository."""
path = Path(config.path_addons_git, get_hash_from_repository(url))
super().__init__(config, loop, path, url)
def remove(self):
"""Remove a custom addon."""
if self.path.is_dir():
_LOGGER.info("Remove custom addon repository %s", self.url)
def log_err(funct, path, _):
"""Log error."""
_LOGGER.warning("Can't remove %s", path)
shutil.rmtree(str(self.path), onerror=log_err)
|
Python
| 0.000001
|
@@ -2507,20 +2507,20 @@
_addons_
+co
re
-po
, URL_HA
|
9ae12dbc66827c74ef8202b1cbd26bc896f5081c
|
Handle importing local modules that don't exist
|
hesitate/rewriter.py
|
hesitate/rewriter.py
|
import ast
import imp
import itertools
import os.path
import sys
class RewriterHook(object):
def __init__(self):
self.loaded_modules = {}
def find_module(self, full_name, path=None):
if path and not isinstance(path, list):
path = list(path)
if path and len(path) == 1:
path = path[0]
modpath = os.path.join(path, full_name.rpartition('.')[2] + '.py')
desc = ('.py', 'r', imp.PY_SOURCE)
fobj = open(modpath)
else:
try:
fobj, modpath, desc = imp.find_module(full_name, path)
except ImportError:
return None
suffix, mode, modtype = desc
try:
if modtype == imp.PY_SOURCE:
code = rewrite_source(fobj.read(), modpath)
self.loaded_modules[full_name] = code
return self
finally:
if fobj:
fobj.close()
def load_module(self, name):
code = self.loaded_modules[name]
mod = imp.new_module(name)
exec(code, mod.__dict__)
sys.modules[name] = mod
return mod
def attach_hook():
sys.meta_path.insert(0, RewriterHook())
def rewrite_source(source, modpath):
try:
parsed = ast.parse(source)
except SyntaxError:
return None
rewritten = AssertionTransformer(modpath).visit(parsed)
return compile(rewritten, modpath, 'exec')
class AssertionTransformer(ast.NodeTransformer):
ASSERTION_TEST_IMPORTED_NAME = '@hesitate_should_assert'
ASSERTION_TIMER_IMPORTED_NAME = '@hesitate_timed'
HAS_WITHITEM = hasattr(ast, 'withitem')
def __init__(self, modpath):
self.modpath = modpath
def _is_docstring(self, node):
return isinstance(node, ast.Expr) \
and isinstance(node.value, ast.Str)
def _is_future_import(self, node):
return isinstance(node, ast.ImportFrom) \
and node.level == 0 \
and node.module == '__future__'
def visit_Module(self, node):
importnode = ast.ImportFrom(
module='hesitate.driver',
names=[
ast.alias(
name='should_assert',
asname=self.ASSERTION_TEST_IMPORTED_NAME),
ast.alias(
name='timed',
asname=self.ASSERTION_TIMER_IMPORTED_NAME)],
lineno=0,
col_offset=0)
pre_nodes = list(itertools.takewhile(
lambda node: (self._is_docstring(node)
or self._is_future_import(node)),
node.body))
rest_nodes = [self.visit(n) for n in node.body[len(pre_nodes):]]
new_mod = ast.Module(
body=pre_nodes + [importnode] + rest_nodes,
lineno=0,
col_offset=0)
return new_mod
def visit_Assert(self, node):
srcname_node = ast.copy_location(ast.Str(self.modpath), node)
lineno_node = ast.copy_location(ast.Num(node.lineno), node)
col_offset_node = ast.copy_location(ast.Num(node.col_offset), node)
assertion_test_name = ast.copy_location(
ast.Name(self.ASSERTION_TEST_IMPORTED_NAME, ast.Load()),
node)
func_call = ast.copy_location(
ast.Call(
func=assertion_test_name,
args=[srcname_node, lineno_node, col_offset_node],
keywords=[]),
node)
timer_name = ast.copy_location(
ast.Name(self.ASSERTION_TIMER_IMPORTED_NAME, ast.Load()),
node)
timer_call = ast.copy_location(
ast.Call(
func=timer_name,
args=[srcname_node, lineno_node, col_offset_node],
keywords=[]),
node)
with_node = ast.copy_location(
self._make_with_node(timer_call, [node]),
node)
new_node = ast.copy_location(
ast.If(
test=func_call,
body=[with_node],
orelse=[]),
node)
return new_node
def _make_with_node(self, with_expr, body):
if self.HAS_WITHITEM:
return ast.With(
items=[ast.withitem(
context_expr=with_expr,
optional_vars=None)],
body=body)
else:
return ast.With(
context_expr=with_expr,
optional_vars=None,
body=body)
|
Python
| 0
|
@@ -479,28 +479,105 @@
-fobj = open(modpath)
+try:%0A fobj = open(modpath)%0A except IOError:%0A return None
%0A
|
6715615d553f57ecbf9db674611f4d56bebf1a5e
|
Add missing EXC variable and combine tokenizer exceptions
|
spacy/sv/tokenizer_exceptions.py
|
spacy/sv/tokenizer_exceptions.py
|
# encoding: utf8
from __future__ import unicode_literals
from ..symbols import *
from ..language_data import PRON_LEMMA
# Verbs
for verb_data in [
{ORTH: "driver"},
{ORTH: "kör"},
{ORTH: "hörr", LEMMA: "hör"},
{ORTH: "fattar"},
{ORTH: "hajar", LEMMA: "förstår"},
{ORTH: "lever"},
{ORTH: "serr", LEMMA: "ser"},
{ORTH: "fixar"}
]:
verb_data_tc = dict(verb_data)
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
for data in [verb_data, verb_data_tc]:
EXC[data[ORTH] + "u"] = [
dict(data),
{ORTH: "u", LEMMA: PRON_LEMMA, NORM: "du"}
]
TOKENIZER_EXCEPTIONS = {
"jan.": [
{ORTH: "jan.", LEMMA: "januari"}
],
"febr.": [
{ORTH: "febr.", LEMMA: "februari"}
],
"feb.": [
{ORTH: "feb.", LEMMA: "februari"}
],
"apr.": [
{ORTH: "apr.", LEMMA: "april"}
],
"jun.": [
{ORTH: "jun.", LEMMA: "juni"}
],
"jul.": [
{ORTH: "jul.", LEMMA: "juli"}
],
"aug.": [
{ORTH: "aug.", LEMMA: "augusti"}
],
"sept.": [
{ORTH: "sept.", LEMMA: "september"}
],
"sep.": [
{ORTH: "sep.", LEMMA: "september"}
],
"okt.": [
{ORTH: "okt.", LEMMA: "oktober"}
],
"nov.": [
{ORTH: "nov.", LEMMA: "november"}
],
"dec.": [
{ORTH: "dec.", LEMMA: "december"}
],
"mån.": [
{ORTH: "mån.", LEMMA: "måndag"}
],
"tis.": [
{ORTH: "tis.", LEMMA: "tisdag"}
],
"ons.": [
{ORTH: "ons.", LEMMA: "onsdag"}
],
"tors.": [
{ORTH: "tors.", LEMMA: "torsdag"}
],
"fre.": [
{ORTH: "fre.", LEMMA: "fredag"}
],
"lör.": [
{ORTH: "lör.", LEMMA: "lördag"}
],
"sön.": [
{ORTH: "sön.", LEMMA: "söndag"}
],
"Jan.": [
{ORTH: "Jan.", LEMMA: "Januari"}
],
"Febr.": [
{ORTH: "Febr.", LEMMA: "Februari"}
],
"Feb.": [
{ORTH: "Feb.", LEMMA: "Februari"}
],
"Apr.": [
{ORTH: "Apr.", LEMMA: "April"}
],
"Jun.": [
{ORTH: "Jun.", LEMMA: "Juni"}
],
"Jul.": [
{ORTH: "Jul.", LEMMA: "Juli"}
],
"Aug.": [
{ORTH: "Aug.", LEMMA: "Augusti"}
],
"Sept.": [
{ORTH: "Sept.", LEMMA: "September"}
],
"Sep.": [
{ORTH: "Sep.", LEMMA: "September"}
],
"Okt.": [
{ORTH: "Okt.", LEMMA: "Oktober"}
],
"Nov.": [
{ORTH: "Nov.", LEMMA: "November"}
],
"Dec.": [
{ORTH: "Dec.", LEMMA: "December"}
],
"Mån.": [
{ORTH: "Mån.", LEMMA: "Måndag"}
],
"Tis.": [
{ORTH: "Tis.", LEMMA: "Tisdag"}
],
"Ons.": [
{ORTH: "Ons.", LEMMA: "Onsdag"}
],
"Tors.": [
{ORTH: "Tors.", LEMMA: "Torsdag"}
],
"Fre.": [
{ORTH: "Fre.", LEMMA: "Fredag"}
],
"Lör.": [
{ORTH: "Lör.", LEMMA: "Lördag"}
],
"Sön.": [
{ORTH: "Sön.", LEMMA: "Söndag"}
],
"sthlm": [
{ORTH: "sthlm", LEMMA: "Stockholm"}
],
"gbg": [
{ORTH: "gbg", LEMMA: "Göteborg"}
]
}
ORTH_ONLY = [
"ang.",
"anm.",
"bil.",
"bl.a.",
"dvs.",
"e.Kr.",
"el.",
"e.d.",
"eng.",
"etc.",
"exkl.",
"f.d.",
"fid.",
"f.Kr.",
"forts.",
"fr.o.m.",
"f.ö.",
"förf.",
"inkl.",
"jur.",
"kl.",
"kr.",
"lat.",
"m.a.o.",
"max.",
"m.fl.",
"min.",
"m.m.",
"obs.",
"o.d.",
"osv.",
"p.g.a.",
"ref.",
"resp.",
"s.a.s.",
"s.k.",
"st.",
"s:t",
"t.ex.",
"t.o.m.",
"ung.",
"äv.",
"övers."
]
|
Python
| 0.000001
|
@@ -115,16 +115,27 @@
_LEMMA%0A%0A
+%0AEXC = %7B%7D%0A%0A
# Verbs%0A
@@ -627,23 +627,17 @@
%5D%0A%0A
-TOKENIZER_EXCEP
+%0AABBREVIA
TION
@@ -3128,16 +3128,94 @@
%5D%0A%7D%0A%0A%0A
+TOKENIZER_EXCEPTIONS = dict(EXC)%0ATOKENIZER_EXCEPTIONS.update(ABBREVIATIONS)%0A%0A%0A
ORTH_ONL
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.