commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
45c86ade944d9afe7bc8e627e25fa861489cd4b6
fix a typo so that email is sent to the correct host
crate_project/settings/production/gondor.py
crate_project/settings/production/gondor.py
import os from .base import * from local_settings import * # Instance specific settings (in deploy.settings_[INSTANCE_NAME])) # Fix Email Settings SERVER_EMAIL = "server@crate.io" DEFAULT_FROM_EMAIL = "support@crate.io" CACHES = { "default": { "BACKEND": "redis_cache.RedisCache", "LOCATION": ":".join([GONDOR_REDIS_HOST, str(GONDOR_REDIS_PORT)]), "KEY_PREFIX": "cache", "OPTIONS": { "DB": 0, "PASSWORD": GONDOR_REDIS_PASSWORD, } } } PYPI_DATASTORE_CONFIG = { "host": GONDOR_REDIS_HOST, "port": GONDOR_REDIS_PORT, "password": GONDOR_REDIS_PASSWORD, } LOCK_DATASTORE_CONFIG = PYPI_DATASTORE_CONFIG # Configure Celery BROKER_TRANSPORT = "redis" BROKER_HOST = GONDOR_REDIS_HOST BROKER_PORT = GONDOR_REDIS_PORT BROKER_VHOST = "0" BROKER_PASSWORD = GONDOR_REDIS_PASSWORD BROKER_POOL_LIMIT = 10 CELERY_RESULT_BACKEND = "redis" CELERY_REDIS_HOST = GONDOR_REDIS_HOST CELERY_REDIS_PORT = GONDOR_REDIS_PORT CELERY_REDIS_PASSWORD = GONDOR_REDIS_PASSWORD SECRET_KEY = os.environ["SECRET_KEY"] EMAIL_HOST = os.environ["EMAIL_HOST_PASSWORD"] EMAIL_PORT = int(os.environ["EMAIL_PORT"]) EMAIL_HOST_USER = os.environ["EMAIL_HOST_USER"] EMAIL_HOST_PASSWORD = os.environ["EMAIL_HOST_PASSWORD"] EMAIL_USE_TLS = True AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] HAYSTACK_CONNECTIONS = { "default": { "ENGINE": os.environ["HAYSTACK_DEFAULT_ENGINE"], "URL": os.environ["HAYSTACK_DEFAULT_URL"], "INDEX_NAME": os.environ["HAYSTACK_DEFAULT_INDEX_NAME"], }, } INTERCOM_USER_HASH_KEY = os.environ["INTERCOM_USER_HASH_KEY"]
Python
0.999541
@@ -1100,33 +1100,24 @@ %5B%22EMAIL_HOST -_PASSWORD %22%5D%0AEMAIL_POR
490ff333d7410f284be36ec938146dc3f36aa7dc
Change ordering of subreddits
main/gen_features.py
main/gen_features.py
__author__ = 'sharvey' import multiprocessing from corpus.mysql.reddit import RedditMySQLCorpus from feature import ngram from feature import lexical import cred import pprint import re def gen_feature(atuple): text = re.sub(r'https?://([a-zA-Z0-9\.\-_]+)[\w\-\._~:/\?#@!\$&\'\*\+,;=%%]*', '\\1', atuple['text'], flags=re.MULTILINE) aset = set() bngram = ngram.get_byte_ngrams(text) for n in bngram['ngram_byte']: for k in bngram['ngram_byte'][n]: aset.add(('nb%d' % n, k)) for n in bngram['ngram_byte_cs']: for k in bngram['ngram_byte_cs'][n]: aset.add(('nbcs%d' % n, k)) wngram = ngram.get_word_ngrams(text) for n in wngram['ngram_word']: for k in wngram['ngram_word'][n]: aset.add(('nw%d' % n, ' '.join(k))) for n in wngram['ngram_word_clean']: for k in wngram['ngram_word_clean'][n]: aset.add(('nwc%d' % n, ' '.join(k))) words, clean_words = ngram.get_words(text) for word in words: aset.add(('w', word)) for word in clean_words: aset.add(('cw', word)) lex = lexical.get_symbol_dist(text) for k in lex['lex']: aset.add(('l', k)) #pprint.pprint(aset) return set(aset) if __name__ == '__main__': corpus = RedditMySQLCorpus() corpus.setup(**(cred.kwargs)) corpus.create() pool = multiprocessing.Pool(multiprocessing.cpu_count()) print('set up pool') chunk = 100 j = 0 feature_set = set() for reddit in ['worldnews', 'news', 'quantum', 'netsec', 'uwaterloo', 'gaming']: while True: print(j) rows = corpus.run_sql('SELECT `body` AS `text` FROM `comment` ' 'LEFT JOIN `submission` ON (`comment`.`submission_id`=`submission`.`id`) ' 'LEFT JOIN `reddit` ON (`submission`.`reddit_id`=`reddit`.`id`) ' 'WHERE `reddit`.`name`= \'%s\'' 'LIMIT %d, %d' % (reddit, j, chunk), None) if len(rows) == 0: break it = pool.imap_unordered(gen_feature, rows, 100) new_feature_set = set() while True: try: atuple = it.next() new_feature_set = new_feature_set.union(atuple) except StopIteration: break new_feature_set.difference_update(feature_set) pprint.pprint(len(new_feature_set)) corpus.run_sqls('INSERT IGNORE INTO `feature_map` (`type`, `feature`) VALUES (%s, %s)', list(new_feature_set)) corpus.cnx.commit() feature_set = feature_set.union(new_feature_set) j += chunk
Python
0.000002
@@ -1537,16 +1537,8 @@ ws', - 'news', 'qu @@ -1576,16 +1576,37 @@ 'gaming' +, 'news', 'AskReddit' %5D:%0A
b3bfc6e3949fcca58cbf84232432c966f5f5d8c6
fix indentation
analyzer/darwin/lib/dtrace/apicalls.py
analyzer/darwin/lib/dtrace/apicalls.py
#!/usr/bin/env python # Copyright (C) 2015 Dmitry Rodionov # This file is part of my GSoC'15 project for Cuckoo Sandbox: # http://www.cuckoosandbox.org # This software may be modified and distributed under the terms # of the MIT license. See the LICENSE file for details. import os import json from common import * from getpass import getuser from subprocess import Popen from collections import namedtuple from tempfile import NamedTemporaryFile apicall = namedtuple("apicall", "api args retval timestamp pid ppid tid") def apicalls(target, **kwargs): """ """ if not target: raise Exception("Invalid target for apicalls()") output_file = NamedTemporaryFile() cmd = ["sudo", "/usr/sbin/dtrace", "-C"] if "timeout" in kwargs: cmd += ["-DANALYSIS_TIMEOUT=%d" % kwargs["timeout"]] cmd += ["-s", path_for_script("apicalls.d")] cmd += ["-DROOT=1"] cmd += ["-o", output_file.name] cmd += ["-DOUTPUT_FILE=\"%s\"" % output_file.name] if "run_as_root" in kwargs: run_as_root = kwargs["run_as_root"] else: run_as_root = False if "args" in kwargs: target_cmd = "%s %s" % (sanitize_path(target), " ".join(kwargs["args"])) else: target_cmd = sanitize_path(target) # When we don't want to run the target as root, we have to drop privileges # with `sudo -u current_user` right before calling the target. if not run_as_root: target_cmd = "sudo -u %s %s" % (getuser(), target_cmd) cmd += ["-c", target_cmd] # The dtrace script will take care of timeout itself, so we just launch # it asynchronously with open(os.devnull, "w") as f: handler = Popen(cmd, stdout=f, stderr=f, cwd=current_directory()) # If we use `sudo -u` for dropping root privileges, we also have to # exclude it's output from the results sudo_pid = None for entry in filelines(output_file): if "## apicalls.d done ##" in entry.strip(): break if len(entry.strip()) == 0: continue call = _parse_entry(entry.strip()) if not run_as_root and sudo_pid is None: sudo_pid = call.pid elif call.pid != sudo_pid: yield call output_file.close() def _parse_entry(entry): entry = entry.replace("\\0", "") parsed = json.loads(entry) api = parsed['api'] args = parsed['args'] retval = parsed['retval'] timestamp = parsed['timestamp'] pid = parsed['pid'] ppid = parsed['ppid'] tid = parsed['tid'] return apicall(api, args, retval, timestamp, pid, ppid, tid)
Python
0.000005
@@ -1740,23 +1740,26 @@ - # If w +# When we'r e us -e +ing %60su @@ -1815,20 +1815,16 @@ to%0A - - # exclud @@ -1829,12 +1829,18 @@ ude -it's +sudo's own out @@ -1860,20 +1860,16 @@ results%0A - sudo
2a1407b34187cfba6c968a7b95e58ec1c115a8f6
Print functions
datacommons/examples/population_analysis.py
datacommons/examples/population_analysis.py
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example analysis with DataCommons Python API. """ import pandas as pd import datacommons def main(): dc = datacommons.Client() # Build a table with a single US state state_table = dc.get_states('United States', 'state', max_rows=1) # Add the state name and the 5 counties contained in that state state_table = dc.expand( state_table, 'name', 'state', 'state_name', outgoing=True) state_table = dc.expand( state_table, 'containedInPlace', 'state', 'county', outgoing=False, max_rows=2) state_table = dc.expand( state_table, 'name', 'county', 'county_name', outgoing=True) state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_population', population_type='Person', max_rows=100) with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_18_24_years_population', population_type='Person', max_rows=100, age='USC/18To24Years') with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_male_population', population_type='Person', max_rows=100, gender='Male') with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table state_table = dc.get_observations( state_table, seed_col_name='county_population', new_col_name='county_person_count', start_date='2012-01-01', end_date='2016-01-01', measured_property='count', stats_type='count') with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table if __name__ == '__main__': main()
Python
0.00001
@@ -1113,17 +1113,17 @@ ax_rows= -2 +3 )%0A stat @@ -1462,33 +1462,33 @@ 100):%0A print - +( state_table%0A%0A s @@ -1474,32 +1474,33 @@ rint(state_table +) %0A%0A state_table @@ -1777,33 +1777,33 @@ 100):%0A print - +( state_table%0A%0A s @@ -1789,32 +1789,33 @@ rint(state_table +) %0A%0A state_table @@ -2077,33 +2077,33 @@ 100):%0A print - +( state_table%0A%0A s @@ -2097,16 +2097,17 @@ te_table +) %0A%0A stat @@ -2441,17 +2441,17 @@ print - +( state_ta @@ -2453,16 +2453,17 @@ te_table +) %0A%0A%0Aif __
d74b15485a0756ac1702fafd640f616f022b3f58
bump verions
equals/__init__.py
equals/__init__.py
from __future__ import absolute_import __version__ = '0.0.2' import numbers import collections from equals.equals import Equals as instance_of from equals.constraints.anything_true import AnythingTrue from equals.constraints.anything_false import AnythingFalse anything = instance_of() try: any_string = instance_of(basestring) except NameError: any_string = instance_of(str) any_number = instance_of(numbers.Number) any_int = instance_of(int) any_float = instance_of(float) any_iterable = instance_of(collections.Iterable) any_dict = instance_of(dict) any_list = instance_of(list) any_tuple = instance_of(tuple) anything_false = AnythingFalse(anything) anything_true = AnythingTrue(anything)
Python
0.000001
@@ -53,16 +53,17 @@ = '0.0.2 +1 '%0A%0Aimpor
9c2d1e9e841014dbc986b6e509b19f7f881969c4
Fix silly typo
openspending/lib/csvexport.py
openspending/lib/csvexport.py
import csv import sys from datetime import datetime from openspending import model from openspending.mongo import DBRef, ObjectId def write_csv(entries, response): response.content_type = 'text/csv' # NOTE: this should be a streaming service but currently # I see no way to know the full set of keys without going # through the data twice. keys = set() rows = [] for entry in entries: d = {} for k, v in model.entry.to_query_dict(entry).items(): if isinstance(v, (list, tuple, dict, DBRef)): continue elif isinstance(v, ObjectId): v = str(v) elif isinstance(v, datetime): v = v.isoformat() d[unicode(k).encode('utf8')] = unicode(v).encode('utf8') keys.update(d.keys()) rows.append(d) fields = sorted(keys) writer = csv.DictWriter(response, fields) if sys.version_info < (2,7): header = dict(zip(fields, fields)) self.writerow(header) else: writer.writeheader() writer.writerows(rows)
Python
0.999999
@@ -999,12 +999,14 @@ -self +writer .wri
ab35f508375c760770884882acaea79079a1a976
remove unnesecary print
erlang/__init__.py
erlang/__init__.py
from __future__ import division def extended_b_lines(usage, blocking): ''' Uses the Extended Erlang B formula to calcluate the ideal number of lines for the given usage in erlangs and the given blocking rate. Usage: extended_b_lines(usage, blocking) ''' line_count = 1 while extended_b(usage, line_count) > blocking: line_count += 1 return line_count def extended_b(usage, lines, recall=0.5): ''' Usage: extended_b(usage, lines, recall=0.5) ''' original_usage = usage while True: PB = b(usage, lines) magic_number_1 = (1 - PB) * usage + (1 - recall) * PB * usage magic_number_2 = 0.9999 * original_usage if magic_number_1 >= magic_number_2: return PB usage = original_usage + recall * PB * usage return -1 def b(usage, lines): ''' Usage: b(usage, lines) ''' if usage > 0: PBR = (1 + usage) / usage for index in range(2, lines + 1): print(PBR) PBR = index / usage * PBR + 1 if PBR > 10000: return 0 return 1 / PBR return 0
Python
0.999922
@@ -879,22 +879,8 @@ 1):%0A -%09%09%09print(PBR)%0A %09%09%09P
3a79a0f16635eab998debdb9be66154ab4db84f5
add a newline after quoted messages
pybb/contrib/quotes/views.py
pybb/contrib/quotes/views.py
from django.shortcuts import get_object_or_404 from django.http import Http404, HttpResponse from django.contrib import messages from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.shortcuts import redirect from django.core.exceptions import PermissionDenied from pybb.views.base import (PostCreateView as BasePostCreateView, TopicDetailView as BaseTopicDetailView, PostsCreateView as BasePostsCreateView) from pybb.contrib.quotes.exceptions import QuoteException from pybb.contrib.quotes.models import quote from pybb.models import Post, Topic from pybb import defaults from pybb.util import generic, load_class def make_session_key(topic): return 'topic:%d:quote_ids' % topic.pk def get_posts_quoted(request, topic): session_key = make_session_key(topic) return (topic.posts.filter_by_user(topic, request.user) .filter(id__in=request.session.get(session_key, []))) class TopicDetailView(BaseTopicDetailView): def get_context_data(self, **kwargs): ctx = super(TopicDetailView, self).get_context_data(**kwargs) return dict(ctx, **{ 'posts_quoted': get_posts_quoted(self.request, self.topic) }) class PostsCreateView(BasePostsCreateView): http_method_names = ['post'] def get_redirect_url(self, **kwargs): url = super(PostsCreateView, self).get_redirect_url(**kwargs) if not 'quote_id' in self.request.POST: return url try: quote_id = int(self.request.POST.get('quote_id', None)) except ValueError: raise Http404 return url + u'?quote_id=%s' % quote_id class PostCreateView(BasePostCreateView): def get_form_kwargs(self): form_kwargs = super(PostCreateView, self).get_form_kwargs() if self.request.method == 'GET': post_quoted = {} if 'quote_id' in self.request.GET: try: quote_id = int(self.request.GET.get('quote_id')) except TypeError: raise Http404 else: post = get_object_or_404(Post, pk=quote_id) post_quoted[post.pk] = post if self.topic: for post in get_posts_quoted(self.request, self.topic): post_quoted[post.pk] = post if len(post_quoted.keys()): form_kwargs['initial']['body'] = '\n'.join([quote(post, post.user.username) for post_id, post in post_quoted.iteritems()]) return form_kwargs def form_valid(self, form): try: response = super(PostCreateView, self).form_valid(form) if self.topic: session_key = make_session_key(self.topic) if session_key in self.request.session: del self.request.session[session_key] return response except QuoteException, e: messages.error(self.request, e.message) return self.form_invalid(form) class QuoteView(generic.View): http_method_names = ['post'] @method_decorator(login_required) def dispatch(self, request, *args, **kwargs): return super(QuoteView, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): try: topic = Topic.objects.get(pk=request.POST['topic_id']) post = topic.posts.filter_by_user(topic, request.user).get(pk=request.POST['post_id']) except (Topic.DoesNotExist, Post.DoesNotExist, ValueError): raise Http404 else: result = all([load_class(pre_post_create_filter)( topic=topic, request=request, forum=topic.forum, ).is_allowed(request.user) for pre_post_create_filter in defaults.PYBB_PRE_POST_CREATE_FILTERS]) if not result: raise PermissionDenied session_key = make_session_key(topic) post_ids = request.session.get(session_key, []) if post.pk in post_ids: post_ids.remove(post.pk) else: post_ids.append(post.pk) request.session[session_key] = post_ids if request.is_ajax(): return HttpResponse('Ok') return redirect(post.get_anchor_url(request.user))
Python
0
@@ -2502,38 +2502,12 @@ -form_kwargs%5B'initial'%5D%5B' body -'%5D = ' @@ -2560,36 +2560,79 @@ - +for post_id, post in post_quoted.iteritems()%5D)%0A @@ -2623,36 +2623,45 @@ - +body += '%5Cn'%0A @@ -2667,51 +2667,42 @@ for - post_id, post in post_quoted.iteritems()%5D) +m_kwargs%5B'initial'%5D%5B'body'%5D = body %0A%0A
c40a07e4ba1bfefd977bc9eea71abe5fcaf97370
Use custom exception in place of NotImplemented
manifestos/twitter.py
manifestos/twitter.py
import re from django.conf import settings import tweepy TWITTER_CONSUMER_KEY = settings.TWITTER_CONSUMER_KEY TWITTER_CONSUMER_SECRET = settings.TWITTER_CONSUMER_SECRET TWITTER_ACCESS_KEY = settings.TWITTER_ACCESS_KEY TWITTER_ACCESS_SECRET = settings.TWITTER_ACCESS_SECRET class TwitterBot(object): """ Creates tweets for the Digital Manifest Twitter Bot. """ def get_auth(self): auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_KEY, TWITTER_ACCESS_SECRET) return auth def get_api(self): auth = self.get_auth() return tweepy.API(auth) def tweet(self, text): if not isinstance(text, str): raise NotImplemented('Can only tweet strings.') # Escape SMS commands pattern = re.compile( r'^(ON|OFF|FOLLOW|F|UNFOLLOW|LEAVE|L|STOP|QUIT|END|CANCEL|' r'UNSBSCRIBE|ARRET|D|M|RETWEET|RT|SET|WHOIS|W|GET|G|FAV|FAVE|' r'FAVORITE|FAVORITE|\*|STATS|SUGGEST|SUG|S|WTF|HELP|INFO|AIDE|' r'BLOCK|BLK|REPORT|REP)(\W)(.*)', re.I) text = re.sub(pattern, '\\1\u200B\\2\\3', text) text = text[:140] api = self.get_api() api.update_status(status=text)
Python
0.000001
@@ -270,16 +270,65 @@ ECRET%0A%0A%0A +class TwitterBotException(Exception):%0A pass%0A%0A%0A class Tw @@ -728,24 +728,77 @@ elf, text):%0A + # Make sure we have legitimate text to tweet%0A if n @@ -845,22 +845,27 @@ ise -NotImplemented +TwitterBotException ('Ca @@ -888,16 +888,128 @@ ings.')%0A + text = text.strip()%0A if not text:%0A raise TwitterBotException('Text has no content.')%0A%0A @@ -1404,25 +1404,79 @@ -text = text%5B:140%5D +# Truncate to 140 characters%0A text = text%5B:140%5D%0A%0A # Tweet %0A
4b4b689463c0e6d0db783a10fcf74b21fea60a68
Fix double repr.
pygments/formatters/other.py
pygments/formatters/other.py
# -*- coding: utf-8 -*- """ pygments.formatters.other ~~~~~~~~~~~~~~~~~~~~~~~~~ Other formatters: NullFormatter, RawTokenFormatter. :copyright: 2006 by Georg Brandl, Armin Ronacher. :license: BSD, see LICENSE for more details. """ from pygments.formatter import Formatter __all__ = ['NullFormatter', 'RawTokenFormatter'] class NullFormatter(Formatter): """ Output the text unchanged without any formatting. """ def format(self, tokensource, outfile): for ttype, value in tokensource: outfile.write(value.encode(self.encoding)) class RawTokenFormatter(Formatter): """ Output a raw token representation for storing token streams. The format is ``tokentype<TAB>repr(tokenstring)`` Additional options accepted: ``compress`` If set to "gz" or "bz2", compress the token stream with the given compression algorithm (default: ''). """ def __init__(self, **options): Formatter.__init__(self, **options) self.compress = options.get('compress', '') def format(self, tokensource, outfile): if self.compress == 'gz': import gzip outfile = gzip.GzipFile('', 'wb', 9, outfile) write = outfile.write flush = outfile.flush elif self.compress == 'bz2': import bz2 compressor = bz2.BZ2Compressor(9) def write(text): outfile.write(compressor.compress(text)) def flush(): outfile.write(compressor.flush()) outfile.flush() else: write = outfile.write flush = outfile.flush lasttype = None lastval = u'' for ttype, value in tokensource: value = repr(value) if ttype is lasttype: lastval += value else: if lasttype: write("%s\t%r\n" % (lasttype, lastval)) lastval = value lasttype = ttype write("%s\t%r\n" % (lasttype, lastval)) flush()
Python
0.000001
@@ -1928,33 +1928,33 @@ write(%22%25s%5Ct%25 -r +s %5Cn%22 %25 (lasttype, @@ -2049,17 +2049,17 @@ e(%22%25s%5Ct%25 -r +s %5Cn%22 %25 (l
a3a1d478a3f68209cab144b8c15f78327f0fbbdd
Indent the text wrap lines hanging
dox/runner.py
dox/runner.py
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'Runner', ] import logging import os import shlex import shutil import subprocess import sys import tempfile import textwrap logger = logging.getLogger(__name__) class Runner(object): def __init__(self, args): self.args = args self.project = os.path.basename(os.path.abspath('.')) self.base_image_name = 'dox/%s/base' % self.project self.test_image_name = 'dox/%s/test' % self.project def _docker_build(self, image, image_dir='.'): logger.info('Building image %s' % image) self._docker_cmd('build', '-t', image, image_dir) def _docker_run(self, *args): logger.info('Running docker') self._docker_cmd('run', *args) def _docker_cmd(self, *args): base_docker = ['docker'] if self.args.debug: base_docker.append('-D') try: self._run_shell_command(base_docker + list(args)) except Exception as e: logger.error("docker failed") logger.info(e.stderr) raise def _run_shell_command(self, cmd): logger.debug('shell: ' + ' '.join(cmd)) if self.args.noop: return process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.read(1) if output == '' and process.poll() is not None: break if output != '' and self.args.verbose or self.args.debug: sys.stdout.write(output) sys.stdout.flush() if process.returncode: raise Exception( "%s returned %d" % (cmd, process.returncode)) def _indent(self, text): wrapper = textwrap.TextWrapper( initial_indent=' ', subsequent_indent=' ') return '\n'.join([wrapper.fill(line) for line in text.split('\n')]) def _get_image_list(self): process = subprocess.Popen( shlex.split('docker images'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = process.communicate() if len(out) == 0 or not out[0] or not out[0].strip(): out_text = '' out_text = out[0].strip().decode('utf-8') return dict([f.split()[:2] for f in out_text.split('\n')]) def have_test_image(self): if self.args.rebuild or self.args.rebuild_all: return False if self.test_image_name in self._get_image_list(): return True return False def build_test_image(self, image, commands): logger.debug( "Want test image %(image)s with %(prep_commands)s" % dict( image=self.test_image_name, prep_commands=commands.prep_commands())) if self.have_test_image(): return dockerfile = [] dockerfile.append("FROM %s" % image) try: tempd = tempfile.mkdtemp() dockerfile.append( "RUN groupadd -g %(gid)s %(user)s" " && useradd -d /src -g %(gid)s -u %(uid)s %(user)s" % dict( uid=os.getuid(), gid=os.getgid(), user=os.getlogin())) for add_file in commands.get_add_files(): shutil.copy(add_file, os.path.join(tempd, add_file)) dockerfile.append("ADD %s /dox/" % add_file) dockerfile.append("WORKDIR /dox") for command in commands.prep_commands(): dockerfile.append("RUN %s\n" % command) dockerfile = '\n'.join(dockerfile) open(os.path.join(tempd, 'Dockerfile'), 'w').write(dockerfile) logger.debug("Dockerfile:\n" + self._indent(dockerfile)) self._docker_build(self.test_image_name, tempd) finally: shutil.rmtree(tempd) def run_commands(self, command): self._docker_run( '--rm', '--user=%s' % os.getlogin(), '-v', "%s:/src" % os.path.abspath('.'), '-w', '/src', self.test_image_name, *command) def have_base_image(self): if self.args.rebuild_all: return False if self.base_image_name in self._get_image_list(): return True return False def build_base_image(self): logger.debug("Want base image") if self.have_base_image(): return self._docker_build(self.base_image_name) def run(self, image, command): logger.debug( "Going to run %(command)s in %(image)s" % dict( command=command.test_command(), image=image)) if self.args.rebuild: logger.debug("Need to rebuild") if image is None: self.build_base_image() self.build_test_image(image, command) self.run_commands(shlex.split(command.test_command()))
Python
0.000586
@@ -2427,24 +2427,28 @@ ent_indent=' + ')%0A
23d2284e477a9e85b7e7a0d4adc8e7e216995450
Add a test for xarray grid shading (#581)
pygmt/tests/test_grdimage.py
pygmt/tests/test_grdimage.py
""" Test Figure.grdimage """ import numpy as np import pytest import xarray as xr from .. import Figure from ..datasets import load_earth_relief from ..exceptions import GMTInvalidInput from ..helpers.testing import check_figures_equal @pytest.fixture(scope="module", name="grid") def fixture_grid(): "Load the grid data from the sample earth_relief file" return load_earth_relief(registration="gridline") @pytest.fixture(scope="module", name="xrgrid") def fixture_xrgrid(): """ Create a sample xarray.DataArray grid for testing """ longitude = np.arange(0, 360, 1) latitude = np.arange(-89, 90, 1) x = np.sin(np.deg2rad(longitude)) y = np.linspace(start=0, stop=1, num=179) data = y[:, np.newaxis] * x return xr.DataArray( data, coords=[ ("latitude", latitude, {"units": "degrees_north"}), ("longitude", longitude, {"units": "degrees_east"}), ], attrs={"actual_range": [-1, 1]}, ) @pytest.mark.mpl_image_compare def test_grdimage(grid): "Plot an image using an xarray grid" fig = Figure() fig.grdimage(grid, cmap="earth", projection="W0/6i") return fig @pytest.mark.mpl_image_compare def test_grdimage_slice(grid): "Plot an image using an xarray grid that has been sliced" grid_ = grid.sel(lat=slice(-30, 30)) fig = Figure() fig.grdimage(grid_, cmap="earth", projection="M6i") return fig @pytest.mark.mpl_image_compare def test_grdimage_file(): "Plot an image using file input" fig = Figure() fig.grdimage( "@earth_relief_01d_g", cmap="ocean", region=[-180, 180, -70, 70], projection="W0/10i", shading=True, ) return fig def test_grdimage_fails(): "Should fail for unrecognized input" fig = Figure() with pytest.raises(GMTInvalidInput): fig.grdimage(np.arange(20).reshape((4, 5))) @pytest.mark.mpl_image_compare def test_grdimage_over_dateline(xrgrid): """ Ensure no gaps are plotted over the 180 degree international dateline. Specifically checking that `xrgrid.gmt.gtype = 1` sets `GMT_GRID_IS_GEO`, and that `xrgrid.gmt.registration = 0` sets `GMT_GRID_NODE_REG`. Note that there would be a gap over the dateline if a pixel registered grid is used. See also https://github.com/GenericMappingTools/pygmt/issues/375. """ fig = Figure() assert xrgrid.gmt.registration == 0 # gridline registration xrgrid.gmt.gtype = 1 # geographic coordinate system fig.grdimage(grid=xrgrid, region="g", projection="A0/0/1c", V="i") return fig @check_figures_equal() def test_grdimage_central_longitude(grid): """ Test that plotting a grid centred at different longitudes/meridians work. """ fig_ref = Figure() fig_ref.grdimage("@earth_relief_01d_g", projection="W120/15c", cmap="geo") fig_test = Figure() fig_test.grdimage(grid, projection="W120/15c", cmap="geo") return fig_ref, fig_test
Python
0
@@ -1729,16 +1729,604 @@ n fig%0A%0A%0A +@pytest.mark.xfail(reason=%22Upstream bug in GMT 6.1.1%22)%0A@check_figures_equal()%0Adef test_grdimage_xarray_shading(grid, fig_ref, fig_test):%0A %22%22%22%0A Test that shading works well for xarray.%0A See https://github.com/GenericMappingTools/pygmt/issues/364%0A %22%22%22%0A fig_ref, fig_test = Figure(), Figure()%0A kwargs = dict(%0A region=%5B-180, 180, -90, 90%5D,%0A frame=True,%0A projection=%22Cyl_stere/6i%22,%0A cmap=%22geo%22,%0A shading=True,%0A )%0A%0A fig_ref.grdimage(%22@earth_relief_01d_g%22, **kwargs)%0A fig_test.grdimage(grid, **kwargs)%0A return fig_ref, fig_test%0A%0A%0A def test
3c9b50d028919beefc9611f85a6ced2fe2362e3e
revert partial change from commit 2397 that should not have gone through - need to find cleaner way of shutting down server
bin/openerp-server.py
bin/openerp-server.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ OpenERP - Server OpenERP is an ERP+CRM program for small and medium businesses. The whole source code is distributed under the terms of the GNU Public Licence. (c) 2003-TODAY, Fabien Pinckaers - Tiny sprl """ #---------------------------------------------------------- # python imports #---------------------------------------------------------- import sys import os import signal import pwd import release __author__ = release.author __version__ = release.version # We DON't log this using the standard logger, because we might mess # with the logfile's permissions. Just do a quick exit here. if pwd.getpwuid(os.getuid())[0] == 'root' : sys.stderr.write("Attempted to run OpenERP server as root. This is not good, aborting.\n") sys.exit(1) #---------------------------------------------------------- # get logger #---------------------------------------------------------- import netsvc logger = netsvc.Logger() #----------------------------------------------------------------------- # import the tools module so that the commandline parameters are parsed #----------------------------------------------------------------------- import tools logger.notifyChannel("server", netsvc.LOG_INFO, "version - %s" % release.version ) for name, value in [('addons_path', tools.config['addons_path']), ('database hostname', tools.config['db_host'] or 'localhost'), ('database port', tools.config['db_port'] or '5432'), ('database user', tools.config['db_user'])]: logger.notifyChannel("server", netsvc.LOG_INFO, "%s - %s" % ( name, value )) # Don't allow if the connection to PostgreSQL done by postgres user if tools.config['db_user'] == 'postgres': logger.notifyChannel("server", netsvc.LOG_ERROR, "%s" % ("Attempted to connect database with postgres user. This is a security flaws, aborting.")) sys.exit(1) import time #---------------------------------------------------------- # init net service #---------------------------------------------------------- logger.notifyChannel("objects", netsvc.LOG_INFO, 'initialising distributed objects services') #--------------------------------------------------------------- # connect to the database and initialize it with base if needed #--------------------------------------------------------------- import pooler #---------------------------------------------------------- # import basic modules #---------------------------------------------------------- import osv import workflow import report import service #---------------------------------------------------------- # import addons #---------------------------------------------------------- import addons #---------------------------------------------------------- # Load and update databases if requested #---------------------------------------------------------- import service.http_server if not ( tools.config["stop_after_init"] or \ tools.config["translate_in"] or \ tools.config["translate_out"] ): service.http_server.init_servers() service.http_server.init_xmlrpc() import service.netrpc_server service.netrpc_server.init_servers() if tools.config['db_name']: for db in tools.config['db_name'].split(','): pooler.get_db_and_pool(db, update_module=tools.config['init'] or tools.config['update']) #---------------------------------------------------------- # translation stuff #---------------------------------------------------------- if tools.config["translate_out"]: import csv if tools.config["language"]: msg = "language %s" % (tools.config["language"],) else: msg = "new language" logger.notifyChannel("init", netsvc.LOG_INFO, 'writing translation file for %s to %s' % (msg, tools.config["translate_out"])) fileformat = os.path.splitext(tools.config["translate_out"])[-1][1:].lower() buf = file(tools.config["translate_out"], "w") tools.trans_export(tools.config["language"], tools.config["translate_modules"], buf, fileformat) buf.close() logger.notifyChannel("init", netsvc.LOG_INFO, 'translation file written successfully') sys.exit(0) if tools.config["translate_in"]: tools.trans_load(tools.config["db_name"], tools.config["translate_in"], tools.config["language"]) sys.exit(0) #---------------------------------------------------------------------------------- # if we don't want the server to continue to run after initialization, we quit here #---------------------------------------------------------------------------------- if tools.config["stop_after_init"]: sys.exit(0) #---------------------------------------------------------- # Launch Servers #---------------------------------------------------------- LST_SIGNALS = ['SIGINT', 'SIGTERM'] if os.name == 'posix': LST_SIGNALS.extend(['SIGUSR1','SIGQUIT']) SIGNALS = dict( [(getattr(signal, sign), sign) for sign in LST_SIGNALS] ) def handler(signum, _): """ :param signum: the signal number :param _: """ netsvc.Agent.quit() netsvc.Server.quitAll() if tools.config['pidfile']: os.unlink(tools.config['pidfile']) logger.notifyChannel('shutdown', netsvc.LOG_INFO, "Shutdown Server! - %s" % ( SIGNALS[signum], )) logger.shutdown() #sys.exit(0) os._exit(0) for signum in SIGNALS: signal.signal(signum, handler) if tools.config['pidfile']: fd = open(tools.config['pidfile'], 'w') pidtext = "%d" % (os.getpid()) fd.write(pidtext) fd.close() netsvc.Server.startAll() logger.notifyChannel("web-services", netsvc.LOG_INFO, 'the server is running, waiting for connections...') while True: time.sleep(60) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -6450,17 +6450,16 @@ n()%0A -# sys.exit @@ -6465,24 +6465,8 @@ t(0) -%0A os._exit(0) %0A%0Afo
07b586ba0d2c8dfac6c344c8020be7e029fbdd11
fix `IPRouteRequest` to work with MPLS dst
pyroute2/netlink/rtnl/req.py
pyroute2/netlink/rtnl/req.py
from socket import AF_INET6 from pyroute2.common import basestring from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.rtmsg import rtmsg class IPRequest(dict): def __init__(self, obj=None): dict.__init__(self) if obj is not None: self.update(obj) def update(self, obj): for key in obj: if obj[key] is not None: self[key] = obj[key] class IPRouteRequest(IPRequest): ''' Utility class, that converts human-readable dictionary into RTNL route request. ''' def __setitem__(self, key, value): # skip virtual IPDB fields if key.startswith('ipdb_'): return # fix family if isinstance(value, basestring) and value.find(':') >= 0: self['family'] = AF_INET6 # work on the rest if key == 'dst': if value != 'default': value = value.split('/') if len(value) == 1: dst = value[0] mask = 0 elif len(value) == 2: dst = value[0] mask = int(value[1]) else: raise ValueError('wrong destination') dict.__setitem__(self, 'dst', dst) dict.__setitem__(self, 'dst_len', mask) elif key == 'metrics': ret = {'attrs': []} for name in value: rtax = rtmsg.metrics.name2nla(name) ret['attrs'].append([rtax, value[name]]) if ret['attrs']: dict.__setitem__(self, 'metrics', ret) elif key == 'multipath': ret = [] for v in value: nh = {'attrs': []} for name in ('flag', 'hops', 'ifindex'): nh[name] = v.pop(name, 0) for name in v: rta = rtmsg.name2nla(name) nh['attrs'].append([rta, v[name]]) ret.append(nh) if ret: dict.__setitem__(self, 'multipath', ret) else: dict.__setitem__(self, key, value) class CBRequest(IPRequest): ''' FIXME ''' commands = None msg = None def __init__(self, *argv, **kwarg): self['commands'] = {'attrs': []} def __setitem__(self, key, value): if value is None: return if key in self.commands: self['commands']['attrs'].\ append([self.msg.name2nla(key), value]) else: dict.__setitem__(self, key, value) class IPLinkRequest(IPRequest): ''' Utility class, that converts human-readable dictionary into RTNL link request. ''' blacklist = ['carrier', 'carrier_changes'] def __init__(self, *argv, **kwarg): self.deferred = [] IPRequest.__init__(self, *argv, **kwarg) if 'index' not in self: self['index'] = 0 def __setitem__(self, key, value): # ignore blacklisted attributes if key in self.blacklist: return # there must be no "None" values in the request if value is None: return # all the values must be in ascii try: if isinstance(value, unicode): value = value.encode('ascii') except NameError: pass # set up specific keys if key == 'kind': self['IFLA_LINKINFO'] = {'attrs': []} linkinfo = self['IFLA_LINKINFO']['attrs'] linkinfo.append(['IFLA_INFO_KIND', value]) if value in ('vlan', 'bond', 'tuntap', 'veth', 'vxlan', 'macvlan', 'macvtap', 'gre'): linkinfo.append(['IFLA_INFO_DATA', {'attrs': []}]) elif key == 'vlan_id': nla = ['IFLA_VLAN_ID', value] # FIXME: we need to replace, not add self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'vlan') elif key == 'gid': nla = ['IFTUN_UID', value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'tuntap') elif key == 'uid': nla = ['IFTUN_UID', value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'tuntap') elif key == 'mode': nla = ['IFTUN_MODE', value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'tuntap') nla = ['IFLA_BOND_MODE', value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'bond') elif key == 'ifr': nla = ['IFTUN_IFR', value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'tuntap') elif key.startswith('macvtap'): nla = [ifinfmsg.name2nla(key), value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'macvtap') elif key.startswith('macvlan'): nla = [ifinfmsg.name2nla(key), value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'macvlan') elif key.startswith('gre'): nla = [ifinfmsg.name2nla(key), value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'gre') elif key.startswith('vxlan'): nla = [ifinfmsg.name2nla(key), value] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'vxlan') elif key == 'peer': nla = ['VETH_INFO_PEER', {'attrs': [['IFLA_IFNAME', value]]}] self.defer_nla(nla, ('IFLA_LINKINFO', 'IFLA_INFO_DATA'), lambda x: x.get('kind', None) == 'veth') dict.__setitem__(self, key, value) if self.deferred: self.flush_deferred() def flush_deferred(self): deferred = [] for nla, path, predicate in self.deferred: if predicate(self): self.append_nla(nla, path) else: deferred.append((nla, path, predicate)) self.deferred = deferred def append_nla(self, nla, path): pwd = self for step in path: if step in pwd: pwd = pwd[step] else: pwd = [x[1] for x in pwd['attrs'] if x[0] == step][0]['attrs'] pwd.append(nla) def defer_nla(self, nla, path, predicate): self.deferred.append((nla, path, predicate)) self.flush_deferred()
Python
0
@@ -887,24 +887,119 @@ if +isinstance(value, dict):%0A dict.__setitem__(self, 'dst', value)%0A elif value != 'de
2a7aee189dff539fe3cf8049319a2b09c6a0fbb1
add new filter to dataset config
pysaliency/dataset_config.py
pysaliency/dataset_config.py
from .datasets import read_hdf5 from .filter_datasets import filter_fixations_by_number, filter_stimuli_by_number, train_split, validation_split, test_split from schema import Schema, Optional dataset_config_schema = Schema({ 'stimuli': str, 'fixations': str, Optional('filters', default=[]): [{ 'type': str, Optional('parameters', default={}): dict, }], }) def load_dataset_from_config(config): config = dataset_config_schema.validate(config) stimuli = read_hdf5(config['stimuli']) fixations = read_hdf5(config['fixations']) for filter_config in config['filters']: stimuli, fixations = apply_dataset_filter_config(stimuli, fixations, filter_config) return stimuli, fixations def apply_dataset_filter_config(stimuli, fixations, filter_config): filter_dict = { 'filter_fixations_by_number': add_stimuli_argument(filter_fixations_by_number), 'filter_stimuli_by_number': filter_stimuli_by_number, 'train_split': train_split, 'validation_split': validation_split, 'test_split': test_split, } if filter_config['type'] not in filter_dict: raise ValueError("Invalid filter name: {}".format(filter_config['type'])) filter_fn = filter_dict[filter_config['type']] return filter_fn(stimuli, fixations, **filter_config['parameters']) def add_stimuli_argument(fn): def wrapped(stimuli, fixations, **kwargs): new_fixations = fn(fixations, **kwargs) return stimuli, new_fixations return wrapped
Python
0
@@ -54,16 +54,22 @@ import +(%0A filter_f @@ -87,16 +87,20 @@ _number, +%0A filter_ @@ -117,16 +117,48 @@ _number, +%0A filter_stimuli_by_size,%0A train_s @@ -162,16 +162,20 @@ n_split, +%0A validat @@ -184,16 +184,20 @@ n_split, +%0A test_sp @@ -199,16 +199,18 @@ st_split +%0A) %0A%0Afrom s @@ -1024,24 +1024,82 @@ _by_number,%0A + 'filter_stimuli_by_size': filter_stimuli_by_size,%0A 'tra
46e21ff57d47f1860d639972dc4eed1994a6cd50
remove print statements
scholars/authentication/pipeline.py
scholars/authentication/pipeline.py
import hashlib from social_core.exceptions import AuthAlreadyAssociated, AuthException def auto_logout(*args, **kwargs): """Do not compare current user with new one""" return {'user': None} def check_email_present(backend, uid, user=None, *args, **kwargs): if not kwargs['details'].get('email'): raise AuthException(backend, "Email wasn't provided by oauth provider") def social_user(backend, uid, user=None, *args, **kwargs): provider = backend.name social = backend.strategy.storage.user.get_social_auth(provider, uid) if social: # can happen when user has multiple accounts with same email (apply email uniqueness strictly) print user print social if user and social.user != user: msg = 'This {0} account is already in use.'.format(provider) raise AuthAlreadyAssociated(backend, msg) elif not user: user = social.user return {'social': social, 'user': user, 'is_new': user is None, 'new_association': social is None} def save_avatar(strategy, details, user=None, *args, **kwargs): """Get user avatar from social provider.""" if user: backend_name = kwargs['backend'].__class__.__name__.lower() response = kwargs.get('response', {}) avatar = None if 'google-oauth2' in backend_name and response.get('image', {}).get('url'): avatar = response['image']['url'].split('?')[0] else: avatar = 'http://www.gravatar.com/avatar/' avatar += hashlib.md5(user.email.lower().encode('utf8')).hexdigest() avatar += '?size=100' if avatar and user.avatar != avatar: user.avatar = avatar strategy.storage.user.changed(user)
Python
0.999999
@@ -673,49 +673,8 @@ ly)%0A - print user%0A print social%0A%0A
28917935e5086ff6a03964babbb5c2e09957b582
Bump version
pytablewriter/__version__.py
pytablewriter/__version__.py
# encoding: utf-8 from datetime import datetime __author__ = "Tsuyoshi Hombashi" __copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__) __license__ = "MIT License" __version__ = "0.46.3" __maintainer__ = __author__ __email__ = "tsuyoshi.hombashi@gmail.com"
Python
0
@@ -207,11 +207,11 @@ %220.4 -6.3 +7.0 %22%0A__
a1d88ff0da34300f0417a9c4679d65b6d38f8bd6
修复#64,task无法删除的bug
app/controller/backend/TasksController.py
app/controller/backend/TasksController.py
#!/usr/bin/env python2 # coding: utf-8 # file: TasksController.py import datetime from flask import redirect, render_template, request, jsonify from . import ADMIN_URL from app import web, db from app.CommonClass.ValidateClass import ValidateClass from app.models import CobraTaskInfo from utils import config __author__ = "lightless" __email__ = "root@lightless.me" # show all tasks @web.route(ADMIN_URL + '/tasks/<int:page>', methods=['GET']) def tasks(page): if not ValidateClass.check_login(): return redirect(ADMIN_URL + '/index') per_page = 10 tasks = CobraTaskInfo.query.order_by('id desc').limit(per_page).offset((page - 1) * per_page).all() # replace data for task in tasks: task.scan_way = "Full Scan" if task.scan_way == 1 else "Diff Scan" task.report = 'http://' + config.Config('cobra', 'domain').value + '/report/' + str(task.id) data = { 'tasks': tasks, } return render_template('backend/task/tasks.html', data=data) # del the special task @web.route(ADMIN_URL + '/del_task', methods=['POST']) def del_task(): if not ValidateClass.check_login(): return redirect(ADMIN_URL + '/index') vc = ValidateClass(request, "id") ret, msg = vc.check_args() if not ret: return jsonify(tag="danger", msg=msg) task = CobraTaskInfo.query.filter_by(id=vc.vars.task_id).first() try: db.session.delete(task) db.session.commit() return jsonify(tag='success', msg='delete success.') except: return jsonify(tag='danger', msg='unknown error.') # edit the special task @web.route(ADMIN_URL + '/edit_task/<int:task_id>', methods=['GET', 'POST']) def edit_task(task_id): if not ValidateClass.check_login(): return redirect(ADMIN_URL + '/index') if request.method == 'POST': # vc = ValidateClass(request, "branch", "scan_way", "new_version", "old_version", "target") # ret, msg = vc.check_args() # if not ret: # return jsonify(tag="danger", msg=msg) # TODO: check new_version and old_version when scan_way == 2 branch = request.form.get('branch') scan_way = request.form.get('scan_way') new_version = request.form.get('new_version') old_version = request.form.get('old_version') target = request.form.get('target') if not branch or branch == "": return jsonify(tag='danger', msg='branch can not be empty') if not scan_way or scan_way == "": return jsonify(tag='danger', msg='scan way can not be empty') if (scan_way == 2) and ((not new_version or new_version == "") or (not old_version or old_version == "")): return jsonify(tag='danger', msg='In diff scan mode, new version and old version can not be empty') if not target or target == "": return jsonify(tag='danger', msg='Target can not be empty.') task = CobraTaskInfo.query.filter_by(id=task_id).first() task.branch = branch task.scan_way = scan_way task.new_version = new_version task.old_version = old_version task.target = target task.updated_time = datetime.datetime.now() try: db.session.add(task) db.session.commit() return jsonify(tag='success', msg='save success.') except: return jsonify(tag='danger', msg='save failed. Try again later?') else: task = CobraTaskInfo.query.filter_by(id=task_id).first() return render_template('backend/task/edit_task.html', data={ 'task': task, })
Python
0
@@ -138,16 +138,59 @@ jsonify +%0Afrom sqlalchemy.exc import SQLAlchemyError %0A%0Afrom . @@ -286,16 +286,32 @@ ateClass +, login_required %0Afrom ap @@ -499,24 +499,40 @@ ds=%5B'GET'%5D)%0A +@login_required%0A def tasks(pa @@ -539,94 +539,8 @@ ge): -%0A if not ValidateClass.check_login():%0A return redirect(ADMIN_URL + '/index') %0A%0A @@ -1069,109 +1069,39 @@ '%5D)%0A -def del_task():%0A if not ValidateClass.check_login():%0A return redirect(ADMIN_URL + '/index') +@login_required%0Adef del_task(): %0A%0A @@ -1282,21 +1282,16 @@ vc.vars. -task_ id).firs @@ -1426,33 +1426,70 @@ ss.')%0A except -: + SQLAlchemyError as e:%0A print e %0A return @@ -1634,16 +1634,32 @@ POST'%5D)%0A +@login_required%0A def edit @@ -1677,94 +1677,8 @@ id): -%0A if not ValidateClass.check_login():%0A return redirect(ADMIN_URL + '/index') %0A%0A @@ -3244,17 +3244,58 @@ except -: + SQLAlchemyError as e:%0A print e %0A
f5b8b4bafabc06504e2ee2e0571f2d8571db17bb
Update for v1.5.4
maxminddb/__init__.py
maxminddb/__init__.py
# pylint:disable=C0111 import os import maxminddb.reader try: import maxminddb.extension except ImportError: maxminddb.extension = None from maxminddb.const import ( MODE_AUTO, MODE_MMAP, MODE_MMAP_EXT, MODE_FILE, MODE_MEMORY, MODE_FD, ) from maxminddb.decoder import InvalidDatabaseError def open_database(database, mode=MODE_AUTO): """Open a Maxmind DB database Arguments: database -- A path to a valid MaxMind DB file such as a GeoIP2 database file, or a file descriptor in the case of MODE_FD. mode -- mode to open the database with. Valid mode are: * MODE_MMAP_EXT - use the C extension with memory map. * MODE_MMAP - read from memory map. Pure Python. * MODE_FILE - read database as standard file. Pure Python. * MODE_MEMORY - load database into memory. Pure Python. * MODE_FD - the param passed via database is a file descriptor, not a path. This mode implies MODE_MEMORY. * MODE_AUTO - tries MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order. Default mode. """ has_extension = maxminddb.extension and hasattr(maxminddb.extension, "Reader") if (mode == MODE_AUTO and has_extension) or mode == MODE_MMAP_EXT: if not has_extension: raise ValueError( "MODE_MMAP_EXT requires the maxminddb.extension module to be available" ) return maxminddb.extension.Reader(database) if mode in (MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY, MODE_FD): return maxminddb.reader.Reader(database, mode) raise ValueError("Unsupported open mode: {0}".format(mode)) def Reader(database): # pylint: disable=invalid-name """This exists for backwards compatibility. Use open_database instead""" return open_database(database) __title__ = "maxminddb" __version__ = "1.5.3" __author__ = "Gregory Oschwald" __license__ = "Apache License, Version 2.0" __copyright__ = "Copyright 2013-2019 Maxmind, Inc."
Python
0
@@ -1942,17 +1942,17 @@ = %221.5. -3 +4 %22%0A__auth
5f9da62f28e61636f33495058f3ea4a98a9d3c19
add invalid separators to test
tests/inside_worker_test/cast_to_float_or_null_test.py
tests/inside_worker_test/cast_to_float_or_null_test.py
import pytest import sqlalchemy from tests.inside_worker_test.conftest import slow @pytest.fixture(params=[2, 2.2, 3.898986, "3.898986", "6", "0.2", 0.6]) def valid_float_representation(request): return request.param @pytest.fixture(params=["a2", "10b", "3.898986k", "3k.898986", "l6.9"]) def invalid_floats(request): return request.param @slow def test_cast_to_float_null_if_failed_returns_floats_with_valid_floats(osmaxx_functions, valid_float_representation): engine = osmaxx_functions result = engine.execute( sqlalchemy.text( "select cast_to_float_null_if_failed($${}$$) as float_value;".format(valid_float_representation) ).execution_options(autocommit=True) ) assert result.rowcount == 1 results = result.fetchall() assert len(results) == 1 assert results[0]['float_value'] == float(valid_float_representation) @slow def test_cast_to_float_null_if_failed_returns_null_with_invalid_floats(osmaxx_functions, invalid_floats): engine = osmaxx_functions result = engine.execute( sqlalchemy.text( "select cast_to_float_null_if_failed($${}$$) as float_value;".format(invalid_floats) ).execution_options(autocommit=True) ) assert result.rowcount == 1 results = result.fetchall() assert len(results) == 1 assert results[0]['float_value'] is None
Python
0.000002
@@ -125,35 +125,14 @@ 86, -%223.898986%22, %226%22, %220.2%22, 0.6 +, 0 %5D)%0Ad @@ -248,15 +248,15 @@ 8986 -k +c %22, %223 -k +d .898 @@ -266,13 +266,68 @@ %22, %22 -l +e 6.9%22 +, %22f0,9%22 %220,g9%22 %220,9h%22, %220,6%22, %22123'456%22, %221 290%22, None %5D)%0Ad
d13d7f0e950554f443e75d27d049a0fee683b822
Make a call to "wait" on Popen
test/bluebird_test.py
test/bluebird_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from bluebird import * import re import os from time import sleep from subprocess import Popen from elftools.elf import elffile as elf def parse_proc_status(pid, field): proc_pid_path = '/proc/{}/status'.format(pid) with open(proc_pid_path) as f: status_raw = f.read() status_field = re.findall('{}:\t(.+)\n'.format(field), status_raw) return int(status_field[0]) def find_string_address(string): alt_print_fh = open('alt_print', 'rb') elf_handle = elf.ELFFile(alt_print_fh) data_section = elf_handle.get_section_by_name('.rodata') data = data_section.data() start_address = data_section.header['sh_addr'] section_data = ''.join(map(chr, data)) return start_address + section_data.find(string) class BlueBirdTest(unittest.TestCase): def setUp(self): self.test_proc_filename = 'alt_print.txt' if os.path.exists(self.test_proc_filename): os.unlink(self.test_proc_filename) self.stdout = open(self.test_proc_filename, 'x') self.test_proc = Popen('./alt_print', stdout=self.stdout) self.test_proc_pid = self.test_proc.pid self.bluebird = Bluebird(self.test_proc_pid) self.bluebird.start() sleep(1) def tearDown(self): if self.test_proc.stdout is not None: self.test_proc.stdout.close() else: self.stdout.close() self.test_proc.kill() os.unlink(self.test_proc_filename) def test_attach(self): tracer_pid = parse_proc_status(self.test_proc_pid, 'TracerPid') self.assertEqual(test_pid, tracer_pid) def test_writestring(self): test_proc_word = 'Potatoe' test_proc_output = 'Process <{}> is running!'.format(self.test_proc_pid) test_proc_newoutput = '{} <{}> is running!'.format(test_proc_word, self.test_proc_pid) self.bluebird.write(test_proc_addr, test_proc_word) sleep(2) with open(self.test_proc_filename) as test_file: proc_output = test_file.read() proc_output_lines = list(filter(None, proc_output.split('\n'))) before_write = proc_output_lines[0] after_write = proc_output_lines[-1] self.assertEqual(after_write, test_proc_newoutput) self.assertNotEqual(after_write, test_proc_output) def test_readstring(self): test_proc_word = 'Process' word = self.bluebird.read(test_proc_addr, 1).strip('\n') self.assertEqual(test_proc_word, word) def test_get_syscall(self): syscall = self.bluebird.get_current_call() self.assertIn(syscall, test_proc_syscalls) def test_get_syscalls(self): test_syscalls = self.bluebird.get_ranged_syscalls(4) calls = test_proc_syscalls * 2 self.assertCountEqual(test_syscalls, calls) def test_find_syscall(self): getsid = syscalls['NR_getsid'] test_find = self.bluebird.find_call(getsid, non_blocking=True) while self.bluebird.tracing: sleep(1) self.assertIsNone(test_find) def test_find_syscall_timeout(self): foo_syscall = 404 test_find = self.bluebird.find_call(foo_syscall, timeout=5) self.assertIsNone(test_find) def test_bbrk(self): self.bluebird.get_heap() limit_before = self.bluebird.heap_bounds[1] brk_inc_size = 0xffff self.bluebird.expand_heap(brk_inc_size) sleep(1) limit_after = self.bluebird.heap_bounds[1] self.assertEqual(limit_before + brk_inc_size + 1, limit_after) def test_bmmap_anon(self): self.bluebird.get_heap() self.bluebird.create_mmap(0, PAGESIZE, PROT_EXEC | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, 0) self.bluebird.get_maps() self.assertIsNotNone(self.bluebird.maps.get('(deleted)')) def test_bmmap_file(self): self.bluebird.get_heap() self.bluebird.create_mmap(0, PAGESIZE, PROT_EXEC | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, 0, path='/tmp/bluebird') self.bluebird.get_maps() sleep(1) self.assertIsNotNone(self.bluebird.maps.get('(deleted)')) def test_get_trace_dir(self): curr_dir = os.getcwd() self.bluebird.get_heap() self.assertEqual(curr_dir, self.bluebird.get_trace_dir()) def test_iotrace_write(self): process_str = 'Process <{}> is running!\n'.format(self.test_proc_pid) self.bluebird.rw_trace(write, ncalls=4) for fd in self.bluebird.wdata: for wstr in self.bluebird.wdata[fd]: self.assertEqual(process_str, wstr) def test_detach(self): tracer_pid = parse_proc_status(self.test_proc_pid, 'TracerPid') self.assertEqual(test_pid, tracer_pid) self.bluebird.stop() sleep(1) tracer_pid = parse_proc_status(self.test_proc_pid, 'TracerPid') self.assertEqual(0, tracer_pid) if __name__ == '__main__': test_pid = os.getpid() write, nanosleep = syscalls['NR_write'], syscalls['NR_nanosleep'] test_proc_addr = find_string_address('Process') test_proc_syscalls = (write, nanosleep) unittest.main(verbosity=3)
Python
0.000014
@@ -1475,24 +1475,54 @@ proc.kill()%0A + self.test_proc.wait()%0A os.u
b3e1b6bd9f79427142ebfe4b57892d1cf3a89e86
Implement the latest test spec for update which requires most of the parameters found in an example usage of mlab-ns against npad.
mlab-ns-simulator/mlabsim/update.py
mlab-ns-simulator/mlabsim/update.py
""" This approximates the mlab-ns slice information gathering. The actual system uses nagios and we're not certain about the details. This much simplified version is just a web URL anyone may PUT data into. Warning: This doesn't have any security properties! We need a way to prevent the addition of malicious entries. """ import json from twisted.web import resource from twisted.web.server import NOT_DONE_YET class UpdateResource (resource.Resource): def __init__(self, db): """db is a dict which will be modified to map { fqdn -> other_details }""" resource.Resource.__init__(self) self._db = db def render_PUT(self, request): [fqdn, tool_extra_json] = self._parse_args(request.args) try: tool_extra = json.loads(tool_extra_json) except ValueError: request.setResponseCode(400, 'invalid') request.finish() else: self._db[fqdn] = {'tool_extra': tool_extra} request.setResponseCode(200, 'ok') request.finish() return NOT_DONE_YET def _parse_args(self, args): for name in ['fqdn', 'tool_extra']: [val] = args[name] yield val
Python
0
@@ -413,16 +413,131 @@ E_YET%0A%0A%0A +DBEntryNames = %5B%0A 'city',%0A 'country',%0A 'fqdn',%0A 'ip',%0A 'port',%0A 'site',%0A 'tool_extra',%0A %5D%0A%0A class Up @@ -792,74 +792,206 @@ -%5Bfqdn, tool_extra_json%5D = self._parse_args(request.args)%0A%0A +dbentry = %7B%7D%0A%0A for name in DBEntryNames:%0A # BUG: Multiple values not handled nor tested:%0A %5Bvalue%5D = request.args%5Bname%5D%0A if name == 'tool_extra':%0A try: @@ -986,16 +986,19 @@ + try:%0A @@ -1006,26 +1006,29 @@ -tool_extra + value = json. @@ -1037,25 +1037,23 @@ ads( -tool_extra_json)%0A +value)%0A @@ -1075,16 +1075,24 @@ eError:%0A + @@ -1139,32 +1139,40 @@ d')%0A + + request.finish() @@ -1184,81 +1184,129 @@ -else:%0A self._db%5B + return NOT_DONE_YET%0A%0A dbentry%5Bname%5D = value%0A%0A self._db%5Bdbentry%5B' fqdn +'%5D %5D = -%7B'tool_extra': tool_extra%7D%0A%0A +dbentry%0A%0A re @@ -1293,34 +1293,32 @@ bentry%0A%0A - request.setRespo @@ -1336,20 +1336,16 @@ , 'ok')%0A - @@ -1394,135 +1394,4 @@ YET%0A -%0A def _parse_args(self, args):%0A for name in %5B'fqdn', 'tool_extra'%5D:%0A %5Bval%5D = args%5Bname%5D%0A yield val%0A
fd1425345606c8a5e9ea87177b710b05fb057ce8
fix typo in clinicaltrials search url parameter
scrapi/harvesters/clinicaltrials.py
scrapi/harvesters/clinicaltrials.py
""" API harvester for ClinicalTrials.gov for the SHARE Notification Service http://clinicaltrials.gov/ct2/results?lup_s=04%2F26%2F2015%2F&lup_e=04%2F27%2F2015&displayxml=true iindividual result: http://ClinicalTrials.gov/show/NCT02425332?displayxml=true """ from __future__ import unicode_literals import time import logging from datetime import date, timedelta from lxml import etree from scrapi import requests from scrapi import settings from scrapi.base import XMLHarvester from scrapi.util import copy_to_unicode from scrapi.linter.document import RawDocument from scrapi.base.schemas import default_name_parser from scrapi.base.helpers import compose, single_result, build_properties, date_formatter logger = logging.getLogger(__name__) class ClinicalTrialsHarvester(XMLHarvester): short_name = 'clinicaltrials' long_name = 'ClinicalTrials.gov' url = 'https://clinicaltrials.gov/' DEFAULT_ENCODING = 'UTF-8' record_encoding = None # TODO - clinicaltrials elements have a lot of extra metadata - at some # point in the future we should do a more thorough audit. schema = { "contributors": ('//overall_official/last_name/node()', default_name_parser), "uris": { "canonicalUri": ("//required_header/url/node()", single_result) }, "providerUpdatedDateTime": ("lastchanged_date/node()", compose(date_formatter, single_result)), "title": ('//official_title/node()', '//brief_title/node()', lambda x, y: single_result(x) or single_result(y)), "description": ('//brief_summary/textblock/node()', '//brief_summary/textblock/node()', lambda x, y: single_result(x) or single_result(y)), "tags": ("//keyword/node()", lambda tags: [tag.lower() for tag in tags]), "sponsorships": [ { "sponsor": { "sponsorName": ("//sponsors/lead_sponsor/agency/node()", single_result) } }, { "sponsor": { "sponsorName": ("//sponsors/collaborator/agency/node()", single_result) } } ], "otherProperties": build_properties( ("serviceID", "//nct_id/node()"), ('oversightAuthority', '//oversight_info/authority/node()'), ('studyDesign', '//study_design/node()'), ('numberOfArms', '//number_of_arms/node()'), ('source', '//source/node()'), ('verificationDate', '//verification_date/node()'), ('lastChanged', '//lastchanged_date/node()'), ('condition', '//condition/node()'), ('verificationDate', '//verification_date/node()'), ('lastChanged', '//lastchanged_date/node()'), ('status', '//status/node()'), ('locationCountries', '//location_countries/country/node()'), ('isFDARegulated', '//is_fda_regulated/node()'), ('isSection801', '//is_section_801/node()'), ('hasExpandedAccess', '//has_expanded_access/node()'), ('leadSponsorAgencyClass', '//lead_sponsor/agency_class/node()'), ('collaborator', '//collaborator/agency/node()'), ('collaboratorAgencyClass', '//collaborator/agency_class/node()'), ('measure', '//primary_outcome/measure/node()'), ('timeFrame', '//primary_outcome/time_frame/node()'), ('safetyIssue', '//primary_outcome/safety_issue/node()'), ('secondaryOutcomes', '//secondary_outcome/measure/node()'), ('enrollment', '//enrollment/node()'), ('armGroup', '//arm_group/arm_group_label/node()'), ('intervention', '//intervention/intervention_type/node()'), ('eligibility', '//eligibility/node()'), ('link', '//link/url/node()'), ('responsible_party', '//responsible_party/responsible_party_full_name/node()') ) } @property def namespaces(self): return None def harvest(self, start_date=None, end_date=None): """ First, get a list of all recently updated study urls, then get the xml one by one and save it into a list of docs including other information """ start_date = start_date or date.today() - timedelta(settings.DAYS_BACK) end_date = end_date or date.today() end_month = end_date.strftime('%m') end_day = end_date.strftime('%d') end_year = end_date.strftime('%Y') start_month = start_date.strftime('%m') start_day = start_date.strftime('%d') start_year = start_date.strftime('%Y') base_url = 'http://clinicaltrials.gov/ct2/results?lup_s=' url_end = '{}%2F{}%2F{}%2F&lup_e={}%2F{}%2F{}&displayxml=true'.\ format(start_month, start_day, start_year, end_month, end_day, end_year) url = base_url + url_end # grab the total number of studies initial_request = requests.get(url) record_encoding = initial_request.encoding initial_request_xml = etree.XML(initial_request.content) count = int(initial_request_xml.xpath('//search_results/@count')[0]) xml_list = [] if int(count) > 0: # get a new url with all results in it url = url + '&count=' + str(count) total_requests = requests.get(url) initial_doc = etree.XML(total_requests.content) # make a list of urls from that full list of studies study_urls = [] for study in initial_doc.xpath('//clinical_study'): study_urls.append(study.xpath('url/node()')[0] + '?displayxml=true') # grab each of those urls for full content logger.info("There are {} urls to harvest - be patient...".format(len(study_urls))) count = 0 official_count = 0 for study_url in study_urls: try: content = requests.get(study_url) except requests.exceptions.ConnectionError as e: logger.info('Connection error: {}, wait a bit...'.format(e)) time.sleep(30) continue doc = etree.XML(content.content) record = etree.tostring(doc, encoding=record_encoding) doc_id = doc.xpath('//nct_id/node()')[0] xml_list.append(RawDocument({ 'doc': record, 'source': self.short_name, 'docID': copy_to_unicode(doc_id), 'filetype': 'xml', })) official_count += 1 count += 1 if count % 100 == 0: logger.info("You've requested {} studies, keep going!".format(official_count)) count = 0 return xml_list
Python
0.000019
@@ -4713,19 +4713,16 @@ F%7B%7D%252F%7B%7D -%252F &lup_e=%7B
ff19498444c7897896d5b2a623f6876f5dd39528
Fix broken import
scrapi/harvesters/clinicaltrials.py
scrapi/harvesters/clinicaltrials.py
""" API harvester for ClinicalTrials.gov for the SHARE Notification Service http://clinicaltrials.gov/ct2/results?lup_s=04%2F26%2F2015%2F&lup_e=04%2F27%2F2015&displayxml=true iindividual result: http://ClinicalTrials.gov/show/NCT02425332?displayxml=true """ from __future__ import unicode_literals import time import logging from datetime import date, timedelta import xmltodict from lxml import etree from scrapi import requests from scrapi import settings from scrapi.base import XMLHarvester from scrapi.util import copy_to_unicode from scrapi.linter.document import RawDocument from scrapi.base.schemas import default_name_parser from scrapi.base.helpers import compose, single_result, build_properties, datetime_formatter logger = logging.getLogger(__name__) element_to_dict = compose(xmltodict.parse, etree.tostring) def non_string(item): return not isinstance(item, str) class ClinicalTrialsHarvester(XMLHarvester): short_name = 'clinicaltrials' long_name = 'ClinicalTrials.gov' url = 'https://clinicaltrials.gov/' DEFAULT_ENCODING = 'UTF-8' record_encoding = None # TODO - clinicaltrials elements have a lot of extra metadata - at some # point in the future we should do a more thorough audit. schema = { "contributors": ('//overall_official/last_name/node()', default_name_parser), "uris": { "canonicalUri": ("//required_header/url/node()", single_result) }, "providerUpdatedDateTime": ("lastchanged_date/node()", compose(datetime_formatter, single_result)), "title": ('//official_title/node()', '//brief_title/node()', lambda x, y: single_result(x) or single_result(y)), "description": ('//brief_summary/textblock/node()', '//brief_summary/textblock/node()', lambda x, y: single_result(x) or single_result(y)), "tags": ("//keyword/node()", lambda tags: [tag.lower() for tag in tags]), "sponsorships": [ { "sponsor": { "sponsorName": ("//sponsors/lead_sponsor/agency/node()", single_result) } }, { "sponsor": { "sponsorName": ("//sponsors/collaborator/agency/node()", single_result) } } ], "otherProperties": build_properties( ("serviceID", "//nct_id/node()"), ('oversightAuthority', '//oversight_info/authority/node()'), ('studyDesign', '//study_design/node()'), ('numberOfArms', '//number_of_arms/node()'), ('source', '//source/node()'), ('verificationDate', '//verification_date/node()'), ('lastChanged', '//lastchanged_date/node()'), ('condition', '//condition/node()'), ('verificationDate', '//verification_date/node()'), ('lastChanged', '//lastchanged_date/node()'), ('status', '//status/node()'), ('locationCountries', '//location_countries/country/node()'), ('isFDARegulated', '//is_fda_regulated/node()'), ('isSection801', '//is_section_801/node()'), ('hasExpandedAccess', '//has_expanded_access/node()'), ('leadSponsorAgencyClass', '//lead_sponsor/agency_class/node()'), ('collaborator', '//collaborator/agency/node()'), ('collaboratorAgencyClass', '//collaborator/agency_class/node()'), ('measure', '//primary_outcome/measure/node()'), ('timeFrame', '//primary_outcome/time_frame/node()'), ('safetyIssue', '//primary_outcome/safety_issue/node()'), ('secondaryOutcomes', '//secondary_outcome/measure/node()'), ('enrollment', '//enrollment/node()'), ('armGroup', '//arm_group/arm_group_label/node()'), ('intervention', '//intervention/intervention_type/node()'), ('eligibility', ('//eligibility/node()', compose( lambda x: list(map(element_to_dict, x)), lambda x: list(filter(non_string, x)) ))), ('link', '//link/url/node()'), ('responsible_party', '//responsible_party/responsible_party_full_name/node()') ) } @property def namespaces(self): return None def harvest(self, start_date=None, end_date=None): """ First, get a list of all recently updated study urls, then get the xml one by one and save it into a list of docs including other information """ start_date = start_date or date.today() - timedelta(settings.DAYS_BACK) end_date = end_date or date.today() end_month = end_date.strftime('%m') end_day = end_date.strftime('%d') end_year = end_date.strftime('%Y') start_month = start_date.strftime('%m') start_day = start_date.strftime('%d') start_year = start_date.strftime('%Y') base_url = 'http://clinicaltrials.gov/ct2/results?lup_s=' url_end = '{}%2F{}%2F{}&lup_e={}%2F{}%2F{}&displayxml=true'.\ format(start_month, start_day, start_year, end_month, end_day, end_year) url = base_url + url_end # grab the total number of studies initial_request = requests.get(url) record_encoding = initial_request.encoding initial_request_xml = etree.XML(initial_request.content) count = int(initial_request_xml.xpath('//search_results/@count')[0]) xml_list = [] if int(count) > 0: # get a new url with all results in it url = url + '&count=' + str(count) total_requests = requests.get(url) initial_doc = etree.XML(total_requests.content) # make a list of urls from that full list of studies study_urls = [] for study in initial_doc.xpath('//clinical_study'): study_urls.append(study.xpath('url/node()')[0] + '?displayxml=true') # grab each of those urls for full content logger.info("There are {} urls to harvest - be patient...".format(len(study_urls))) count = 0 official_count = 0 for study_url in study_urls: try: content = requests.get(study_url) except requests.exceptions.ConnectionError as e: logger.info('Connection error: {}, wait a bit...'.format(e)) time.sleep(30) continue doc = etree.XML(content.content) record = etree.tostring(doc, encoding=record_encoding) doc_id = doc.xpath('//nct_id/node()')[0] xml_list.append(RawDocument({ 'doc': record, 'source': self.short_name, 'docID': copy_to_unicode(doc_id), 'filetype': 'xml', })) official_count += 1 count += 1 if count % 100 == 0: logger.info("You've requested {} studies, keep going!".format(official_count)) count = 0 return xml_list
Python
0.000006
@@ -598,22 +598,22 @@ pi.base. -schema +helper s import @@ -617,59 +617,13 @@ ort -default_name_parser%0Afrom scrapi.base.helpers import +(%0A com @@ -628,16 +628,21 @@ ompose, +%0A single_r @@ -648,16 +648,21 @@ result, +%0A build_pr @@ -671,16 +671,21 @@ erties, +%0A datetime @@ -694,16 +694,44 @@ ormatter +, %0A default_name_parser%0A) %0A%0Alogger
45a24fae9f5e1ee24c2e0283746224e51f718cc2
Remove redundant test of permissions parameter
planex/tree.py
planex/tree.py
""" In-memory 'filesystem' library """ import os class Tree(object): """ An in-memory 'filesystem' which accumulates file changes to be written later. """ def __init__(self): self.tree = {} def append(self, filename, contents=None, permissions=None): """ Append contents to filename in the in-memory filesystem. """ node = self.tree.get(filename, {}) if contents: node['contents'] = node.get('contents', '') + contents if permissions: if 'permissions' in node and \ node['permissions'] != permissions: raise Exception("Inconsistent permissions for '%s'" % filename) if permissions: node['permissions'] = permissions else: node['permissions'] = 0o644 self.tree[filename] = node def apply(self, basepath): """ Save in-memory filesystem to disk. """ for subpath, node in self.tree.items(): permissions = node.get("permissions", 0o644) contents = node.get("contents", "") fullpath = os.path.join(basepath, subpath) if not os.path.isdir(os.path.dirname(fullpath)): os.makedirs(os.path.dirname(fullpath)) out = os.open(os.path.join(basepath, subpath), os.O_WRONLY | os.O_CREAT, permissions) os.write(out, contents) os.close(out) def __repr__(self): res = "" for subpath, node in self.tree.items(): permissions = node.get("permissions", 0o644) contents = node.get("contents", "") res += "%s (0o%o):\n" % (subpath, permissions) res += contents res += "\n\n" return res
Python
0
@@ -412,16 +412,17 @@ me, %7B%7D)%0A +%0A @@ -493,32 +493,33 @@ '') + contents%0A +%0A if permi @@ -709,41 +709,8 @@ me)%0A -%0A if permissions:%0A @@ -755,69 +755,8 @@ ons%0A - else:%0A node%5B'permissions'%5D = 0o644 %0A
df80f5c2ae1bbf67a832e6e8b641799c5b54dc53
fix typo
scripts/construct_bakeoff_graphs.py
scripts/construct_bakeoff_graphs.py
#!/usr/bin/python """ Locally regenerate all the bakeoff regions graphs and indexes that are found here s3://cgl-pipeline-inputs/vg_cgl/bakeoff/ The input fasta's and vcf's are expected to be there already """ import os, sys, subprocess region_to_bed_hg38 = { 'BRCA1':('17', 43044293, 43125482), 'BRCA2':('13', 32314860, 32399849), 'SMA':('5', 69216818, 71614443), 'MHC':('6', 28510119, 33480577) } def get_vcf_coords_hg38(region): r = region_to_bed_hg38[region] # change bed to 1-based inclusive return '{}:{}-{}'.format(r[0], r[1] + 1, r[2]) def get_vcf_path_hg38(region): return 's3://cgl-pipeline-inputs/vg_cgl/bakeoff/1kg_hg38-{}.vcf.gz'.format(region) def get_fasta_path_hg38(region): chrom = region_to_bed_hg38[region][0] return 's3://cgl-pipeline-inputs/vg_cgl/bakeoff/chr{}.fa.gz'.format(chrom) if len(sys.argv) not in [3,4]: print "Usage: {} jobstore outstore <config>".format(sys.argv[0]) sys.exit(1) job_store = sys.argv[1] out_store = sys.argv[2] config = sys.argv[3] if len(sys.argv) == 4 else None config_opts = [] if not config else ['--config', config] for region in ['BRCA1', 'BRCA2', 'SMA', 'MHC']: # make the graphs/indexes and a bunch of controls cmd = ['toil-vg', 'construct', job_store, out_store, '--vcf', get_vcf_path_hg38(region), '--fasta', get_fasta_path_hg38(region), '--regions', get_vcf_coords_hg38(region), '--out_name', 'snp1kg-{}'.format(region), '--alt_paths', '--realTimeLogging', '--control_sample', 'HG00096', '--min_af', '0.0335570469', '--primary', '--gcsa_index', '--xg_index'] + config_opts subprocess.check_call(cmd) # make the gbwt of the "minus" graph cmd = ['toil-vg', 'index', job_store, out_store, '--graphs', os.path.join(out_store, 'snp1kg-{}_HG00096.vg'.format(region)), '--chroms', region_to_bed_hg38[region][0], '--vcf_phasing', get_vcf_path_hg38(region), '--make_gbwt', '--index_name', 'snp1kg_minus_HG00096-{}'.format(region), '--skip_gcsa', '--realTimeLogging'] + config_opts subprocess.check_call(cmd) # make the names consistent to what we've been using for os_file in os.listdir(out_store): prefix = 'snp1kg-{}'.format(region) if os_file.startswith(prefix): if os_file.endswith('.gcsa.lcp'): ext = '.gcsa.lscp' name = os_file[:-len(ext)] else: name, ext = os.path.splitext(os_file) new_name = 'snp1kg' + name[len(prefix):] + '-{}'.format(region) + ext if new_name.startswith('snp1kg_primary'): new_name = new_name[len('snp1kg_'):] elif new_name.startswith('snp1kg_minaf_0.0335570469'): new_name = 'snp1kg_threshold10' + new_name[len('snp1kg_minaf_0.0335570469'):] if os_file != new_name: cmd = ['mv', os.path.join(out_store, os_file), os.path.join(out_store, new_name)] subprocess.check_call(cmd)
Python
0.999991
@@ -2464,17 +2464,16 @@ '.gcsa.l -s cp'%0A
d4257ea870a665f7d52fbedc225b56f6607d439d
fix parameters string
python_terraform/__init__.py
python_terraform/__init__.py
import subprocess import os import json import logging log = logging.getLogger(__name__) class Terraform: def __init__(self, targets=None, state='terraform.tfstate', variables=None): self.targets = [] if targets is None else targets self.variables = dict() if variables is None else variables self.state_filename = state self.state_data = dict() self.parallelism = 50 def apply(self, targets=None, variables=None, **kargs): """ refer to https://terraform.io/docs/commands/apply.html :param variables: variables in dict type :param targets: targets in list :returns return_code, stdout, stderr """ variables = self.variables if variables is None else variables targets = self.targets if targets is None else targets parameters = [] parameters += self._generate_targets(targets) parameters += self._generate_var_string(variables) parameters += self._gen_param_string(kargs) parameters = \ ['terraform', 'apply', '-state=%s' % self.state_filename] + parameters cmd = ' '.join(parameters) return self._run_cmd(cmd) def _gen_param_string(self, kargs): params = [] for key, value in kargs.items(): params += ['%s=%s' % (key, value)] return params def _run_cmd(self, cmd): log.debug('command: ' + cmd) p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = p.communicate() ret_code = p.returncode log.debug('output: ' + out) if ret_code == 0: log.debug('error: ' + err) self.read_state_file() return ret_code, out, err def destroy(self, targets=None, variables=None, **kwargs): variables = self.variables if variables is None else variables targets = self.targets if targets is None else targets parameters = [] parameters += self._generate_targets(targets) parameters += self._generate_var_string(variables) parameters = \ ['terraform', 'destroy', '-force', '-state=%s' % self.state_filename] + \ parameters cmd = ' '.join(parameters) return self._run_cmd(cmd) def refresh(self, targets=None, variables=None): variables = self.variables if variables is None else variables targets = self.targets if targets is None else targets parameters = [] parameters += self._generate_targets(targets) parameters += self._generate_var_string(variables) parameters = \ ['terraform', 'refresh', '-state=%s' % self.state_filename] + \ parameters cmd = ' '.join(parameters) return self._run_cmd(cmd) def read_state_file(self): """ read .tfstate file :return: states file in dict type """ if os.path.exists(self.state_filename): with open(self.state_filename) as f: json_data = json.load(f) self.state_data = json_data log.debug("state_data=%s" % str(self.state_data)) return json_data return dict() def is_any_aws_instance_alive(self): self.refresh() if not os.path.exists(self.state_filename): log.debug("can't find %s " % self.state_data) return False self.read_state_file() try: main_module = self._get_main_module() for resource_key, info in main_module['resources'].items(): if 'aws_instance' in resource_key: log.debug("%s is found when read state" % resource_key) return True log.debug("no aws_instance found in resource key") return False except KeyError as err: log.debug(str(err)) return False except TypeError as err: log.debug(str(err)) return False def _get_main_module(self): return self.state_data['modules'][0] def get_aws_instances(self): instances = dict() try: main_module = self._get_main_module() for resource_key, info in main_module['resources'].items(): if 'aws_instance' in resource_key: instances[resource_key] = info except KeyError: return instances except TypeError: return instances return instances def get_aws_instance(self, resource_name): """ :param resource_name: name of terraform resource, make source count is attached :return: return None if not exist, dict type if exist """ try: return self.get_aws_instances()[resource_name] except KeyError: return None def get_output_value(self, output_name): """ :param output_name: :return: """ try: main_module = self._get_main_module() return main_module['outputs'][output_name] except KeyError: return None @staticmethod def _generate_var_string(d): str_t = [] for k, v in d.iteritems(): str_t += ['-var'] + ["%s=%s" % (k, v)] return str_t @staticmethod def _generate_targets(targets): str_t = [] for t in targets: str_t += ['-target=%s' % t] return str_t
Python
0.99972
@@ -1294,32 +1294,62 @@ kargs.items():%0A + if not value:%0A para @@ -1356,16 +1356,17 @@ ms += %5B' +- %25s=%25s' %25 @@ -1380,16 +1380,74 @@ value)%5D%0A + else:%0A params += %5B'-%25s' %25 key%5D%0A
7ad7f0231bc50c58f9b606cbab36d6cd98e141ec
Make the error message clearer (#944)
pyvista/plotting/__init__.py
pyvista/plotting/__init__.py
"""Plotting routines.""" from .colors import (color_char_to_word, get_cmap_safe, hex_to_rgb, hexcolors, string_to_rgb, PARAVIEW_BACKGROUND) from .export_vtkjs import export_plotter_vtkjs, get_vtkjs_url from .helpers import plot, plot_arrows, plot_compare_four, plot_itk from .itkplotter import PlotterITK from .plotting import BasePlotter, Plotter, close_all from .renderer import CameraPosition, Renderer, scale_point from .theme import (DEFAULT_THEME, FONT_KEYS, MAX_N_COLOR_BARS, parse_color, parse_font_family, rcParams, set_plot_theme) from .tools import (create_axes_marker, create_axes_orientation_box, opacity_transfer_function, system_supports_plotting) from .widgets import WidgetHelper class QtDeprecationError(Exception): """Depreciation Error for features that moved to `pyvistaqt`.""" message = """`{}` has moved to pyvistaqt. You can install this from PyPI with: `pip install pyvistaqt` See https://github.com/pyvista/pyvistaqt """ def __init__(self, feature_name): """Empty init.""" Exception.__init__(self, self.message.format(feature_name)) class BackgroundPlotter(): """This class has been moved to pyvistaqt.""" def __init__(self, *args, **kwargs): """Empty init.""" raise QtDeprecationError('BackgroundPlotter') class QtInteractor(): """This class has been moved to pyvistaqt.""" def __init__(self, *args, **kwargs): """Empty init.""" raise QtDeprecationError('QtInteractor')
Python
0.003492
@@ -976,16 +976,116 @@ istaqt%60%0A + Then import it via: %60from pyvistaqt import %7B%7D%60%0A %60%7B%7D%60 is no longer accessible by %60pyvista.%7B%7D%60%0A See @@ -1243,16 +1243,18 @@ .format( +*%5B feature_ @@ -1257,16 +1257,21 @@ ure_name +%5D * 4 ))%0A%0A%0Acla
fe08db2713cb35e1424034d58d750ebdc52cedbc
Remove explicit < 400 check as apparently this is confusing
synapse/util/retryutils.py
synapse/util/retryutils.py
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from twisted.internet import defer from synapse.api.errors import CodeMessageException import logging import random logger = logging.getLogger(__name__) class NotRetryingDestination(Exception): def __init__(self, retry_last_ts, retry_interval, destination): msg = "Not retrying server %s." % (destination,) super(NotRetryingDestination, self).__init__(msg) self.retry_last_ts = retry_last_ts self.retry_interval = retry_interval self.destination = destination @defer.inlineCallbacks def get_retry_limiter(destination, clock, store, **kwargs): """For a given destination check if we have previously failed to send a request there and are waiting before retrying the destination. If we are not ready to retry the destination, this will raise a NotRetryingDestination exception. Otherwise, will return a Context Manager that will mark the destination as down if an exception is thrown (excluding CodeMessageException with code < 500) Example usage: try: limiter = yield get_retry_limiter(destination, clock, store) with limiter: response = yield do_request() except NotRetryingDestination: # We aren't ready to retry that destination. raise """ retry_last_ts, retry_interval = (0, 0) retry_timings = yield store.get_destination_retry_timings( destination ) if retry_timings: retry_last_ts, retry_interval = ( retry_timings["retry_last_ts"], retry_timings["retry_interval"] ) now = int(clock.time_msec()) if retry_last_ts + retry_interval > now: raise NotRetryingDestination( retry_last_ts=retry_last_ts, retry_interval=retry_interval, destination=destination, ) defer.returnValue( RetryDestinationLimiter( destination, clock, store, retry_interval, **kwargs ) ) class RetryDestinationLimiter(object): def __init__(self, destination, clock, store, retry_interval, min_retry_interval=10 * 60 * 1000, max_retry_interval=24 * 60 * 60 * 1000, multiplier_retry_interval=5, backoff_on_404=False): """Marks the destination as "down" if an exception is thrown in the context, except for CodeMessageException with code < 500. If no exception is raised, marks the destination as "up". Args: destination (str) clock (Clock) store (DataStore) retry_interval (int): The next retry interval taken from the database in milliseconds, or zero if the last request was successful. min_retry_interval (int): The minimum retry interval to use after a failed request, in milliseconds. max_retry_interval (int): The maximum retry interval to use after a failed request, in milliseconds. multiplier_retry_interval (int): The multiplier to use to increase the retry interval after a failed request. backoff_on_404 (bool): Back off if we get a 404 """ self.clock = clock self.store = store self.destination = destination self.retry_interval = retry_interval self.min_retry_interval = min_retry_interval self.max_retry_interval = max_retry_interval self.multiplier_retry_interval = multiplier_retry_interval self.backoff_on_404 = backoff_on_404 def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): valid_err_code = False if exc_type is not None and issubclass(exc_type, CodeMessageException): # Some error codes are perfectly fine for some APIs, whereas other # APIs may expect to never received e.g. a 404. It's important to # handle 404 as some remote servers will return a 404 when the HS # has been decommissioned. if exc_val.code < 400: valid_err_code = True elif exc_val.code == 404 and self.backoff_on_404: valid_err_code = False elif exc_val.code == 429: # 429 is us being aggresively rate limited, so lets rate limit # ourselves. valid_err_code = False elif exc_val.code < 500: valid_err_code = True else: valid_err_code = False if exc_type is None or valid_err_code: # We connected successfully. if not self.retry_interval: return retry_last_ts = 0 self.retry_interval = 0 else: # We couldn't connect. if self.retry_interval: self.retry_interval *= self.multiplier_retry_interval self.retry_interval *= int(random.uniform(0.8, 1.4)) if self.retry_interval >= self.max_retry_interval: self.retry_interval = self.max_retry_interval else: self.retry_interval = self.min_retry_interval retry_last_ts = int(self.clock.time_msec()) @defer.inlineCallbacks def store_retry_timings(): try: yield self.store.set_destination_retry_timings( self.destination, retry_last_ts, self.retry_interval ) except: logger.exception( "Failed to store set_destination_retry_timings", ) store_retry_timings()
Python
0
@@ -4760,83 +4760,8 @@ ode -%3C 400:%0A valid_err_code = True%0A elif exc_val.code == 4
f53e7452676e6ee903a4d8c350fa356a718a5fcc
Add a test for file: and path: searches for non-ASCII things.
tests/test_path_file_filters/test_path_file_filters.py
tests/test_path_file_filters/test_path_file_filters.py
from nose.tools import raises from dxr.testing import DxrInstanceTestCase class PathAndFileFilterTests(DxrInstanceTestCase): """Basic tests for functionality of the 'path:' and 'file:' filters""" def test_basic_path_results(self): """Check that a 'path:' result includes both file and folder matches.""" self.found_files_eq('path:fish', ['fish1', 'fishy_folder/fish2', 'fishy_folder/gill', 'folder/fish3', 'folder/fish4']) def test_basic_file_results(self): """Check that a 'file:' result includes only file matches.""" self.found_files_eq('file:fish', ['fish1', 'fishy_folder/fish2', 'folder/fish3', 'folder/fish4']) def test_path_and_file_line_promotion(self): """Make sure promotion of a 'path:' or 'file:' filter to a LINE query works. """ self.found_files_eq('path:fish fins', ['folder/fish3']) self.found_files_eq('file:fish fins', ['folder/fish3']) # This fails because we currently intentionally exclude folder paths from # FILE query results - remove the @raises line when that's changed. (Of # course then other tests here will need to be updated as well.) @raises(AssertionError) def test_empty_folder_path_results(self): """Check that 'path:' results include empty folders.""" self.found_files_eq('path:empty_folder', ['empty_folder']) def test_basic_wildcard(self): """Test basic wildcard functionality.""" # 'path:' and 'file:' currently have the same underlying wildcard # support, so we're spreading out the basic wildcard testing over both. self.found_files_eq('path:fish?_fo*er', ['fishy_folder/fish2', 'fishy_folder/gill']) self.found_files_eq('file:fish[14]', ['fish1', 'folder/fish4'])
Python
0.000001
@@ -1,12 +1,36 @@ +# -*- coding: utf-8 -*-%0A from nose.to @@ -1937,28 +1937,339 @@ %5B'fish1', 'folder/fish4'%5D)%0A +%0A def test_unicode(self):%0A %22%22%22Make sure searching for non-ASCII names works.%22%22%22%0A self.found_files_eq(u'file:fre%5Cu0301mium*', %5Bu'fre%5Cu0301mium.txt'%5D)%0A%0A # This one fails because %C3%A9 is normalized differently in ES than here:%0A # self.found_files_eq(u'file:fr%C3%A9mium*', %5Bu'fr%C3%A9mium.txt'%5D)%0A
d22bd8970b973fb58f1358b62cf8c27f826aa407
update example
example/gravity.py
example/gravity.py
from pgmagick import Image, Geometry, Color, TypeMetric, \ DrawableText, DrawableList, DrawableGravity, GravityType im = Image(Geometry(600, 600), Color("transparent")) im.fontPointsize(30) im.fillColor(Color("#f010f0")) im.strokeColor(Color("transparent")) im.font("Vera.ttf") dl = DrawableList() dl.append(DrawableGravity(GravityType.CenterGravity)) dl.append(DrawableText(0, 0, "center")) tm = TypeMetric() im.fontTypeMetrics("northn", tm) font_height = tm.textHeight() dl.append(DrawableGravity(GravityType.NorthGravity)) dl.append(DrawableText(0, font_height / 2., "north")) dl.append(DrawableGravity(GravityType.WestGravity)) dl.append(DrawableText(0, 0, "west")) dl.append(DrawableGravity(GravityType.EastGravity)) dl.append(DrawableText(0, 0, "east")) dl.append(DrawableGravity(GravityType.SouthGravity)) dl.append(DrawableText(0, 0, "south")) dl.append(DrawableGravity(GravityType.NorthWestGravity)) dl.append(DrawableText(0, font_height / 2., "north-west")) dl.append(DrawableGravity(GravityType.NorthEastGravity)) dl.append(DrawableText(0, font_height / 2., "north-east")) dl.append(DrawableGravity(GravityType.SouthWestGravity)) dl.append(DrawableText(0, 0, "south-west")) dl.append(DrawableGravity(GravityType.SouthEastGravity)) dl.append(DrawableText(0, 0, "south-east")) im.draw(dl) im.write("test.png")
Python
0.000001
@@ -775,24 +775,68 @@ 0, %22east%22)) +%0Adl.append(DrawableText(0, 20, %22east-long%22)) %0A%0Adl.append(
73e99078b3bce587e059b1a15dbb7f94be70dd8d
enable the possibility of success
testcases/OpalMsglog.py
testcases/OpalMsglog.py
#!/usr/bin/python2 # OpenPOWER Automated Test Project # # Contributors Listed Below - COPYRIGHT 2017 # [+] International Business Machines Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # import unittest import OpTestConfiguration from common.OpTestSystem import OpSystemState from common.OpTestConstants import OpTestConstants as BMC_CONST class OpalMsglog(): def setUp(self): conf = OpTestConfiguration.conf self.cv_HOST = conf.host() self.cv_IPMI = conf.ipmi() self.cv_SYSTEM = conf.system() def runTest(self): self.setup_test() log_entries = self.c.run_command("grep ',[0-4]\]' /sys/firmware/opal/msglog") msg = '\n'.join(filter(None, log_entries)) self.assertTrue( len(log_entries) == 0, "Warnings/Errors in OPAL log:\n%s" % msg) class Skiroot(OpalMsglog, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL) self.c = self.cv_SYSTEM.sys_get_ipmi_console() self.cv_SYSTEM.host_console_unique_prompt() class Host(OpalMsglog, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.OS) self.c = self.cv_SYSTEM.host().get_ssh_connection()
Python
0
@@ -842,16 +842,60 @@ MC_CONST +%0Afrom common.Exceptions import CommandFailed %0A%0Aclass @@ -1124,24 +1124,41 @@ etup_test()%0A + try:%0A log_ @@ -1239,16 +1239,20 @@ + msg = '%5C @@ -1290,24 +1290,28 @@ s))%0A + + self.assertT @@ -1379,16 +1379,173 @@ %22 %25 msg) +%0A except CommandFailed as cf:%0A if cf.exitcode is 1 and len(cf.output) is 0:%0A pass%0A else:%0A raise cf %0A%0Aclass
6cb0822aade07999d54e5fcd19eb2c7322abc80a
Improve performance @ Measurement Admin
measurement/admin.py
measurement/admin.py
from django.contrib import admin from .models import Measurement admin.site.register(Measurement)
Python
0
@@ -59,16 +59,221 @@ rement%0A%0A +%0Aclass MeasurementAdmin(admin.ModelAdmin):%0A model = Measurement%0A%0A def get_queryset(self, request):%0A return super(MeasurementAdmin, self).get_queryset(request).select_related('patient__user')%0A%0A admin.si @@ -295,10 +295,28 @@ surement +, MeasurementAdmin )%0A
24b2509b1605dfd6d3eb325ed946c3d23441b969
use Python's QT stuff
demo/quicktime.py
demo/quicktime.py
#!/usr/bin/env python """Display quicktime movie.""" import os import VisionEgg from VisionEgg.Core import * from VisionEgg.Text import * from VisionEgg.Textures import * from VisionEgg.QuickTime import * screen = get_default_screen() screen.set(bgcolor=(0,0,0)) filename = os.path.join(VisionEgg.config.VISIONEGG_SYSTEM_DIR,"data","water.mov") movie = Movie(filename) left, bottom, right, top = movie.get_box() width,height = abs(right-left), abs(top-bottom) scale_x = screen.size[0]/float(width) scale_y = screen.size[1]/float(height) scale = min(scale_x,scale_y) # maintain aspect ratio movie_texture = MovieTexture(movie=movie) stimulus = TextureStimulus( texture=movie_texture, position = (screen.size[0]/2.0,screen.size[1]/2.0), anchor = 'center', mipmaps_enabled = False, # can't do mipmaps with QuickTime movies shrink_texture_ok = True, size = (width*scale, height*scale), ) text = Text( text = "Vision Egg QuickTime movie demo - Press any key to quit", position = (screen.size[0]/2,screen.size[1]), anchor = 'top', color = (1.0, 1.0, 1.0), ) viewport = Viewport(screen=screen, stimuli=[stimulus, text]) movie.start() frame_timer = FrameTimer() while not pygame.event.peek((pygame.locals.QUIT, pygame.locals.KEYDOWN, pygame.locals.MOUSEBUTTONDOWN)): movie.task() screen.clear() viewport.draw() swap_buffers() # display the frame we've drawn in back buffer frame_timer.tick() if movie.is_done(): movie.go_to_beginning() frame_timer.print_histogram()
Python
0.000002
@@ -56,16 +56,21 @@ mport os +, sys %0Aimport @@ -202,17 +202,53 @@ import -* +new_movie_from_filename, MovieTexture %0A%0Ascreen @@ -300,16 +300,75 @@ ,0,0))%0A%0A +if len(sys.argv) %3E 1:%0A filename = sys.argv%5B1%5D%0Aelse:%0A filename @@ -453,49 +453,80 @@ e = -Movie(filename)%0A%0Aleft, bottom, right, top +new_movie_from_filename(filename) # movie is type Carbon.Qt.Movie%0Abounds = m @@ -534,13 +534,17 @@ vie. -get_b +GetMovieB ox() @@ -553,50 +553,59 @@ idth -,height = abs(right-left), abs(top-bottom) + = bounds%5B2%5D-bounds%5B0%5D%0Aheight = bounds%5B3%5D-bounds%5B1%5D %0A%0Asc @@ -1370,13 +1370,18 @@ vie. -s +S tart +Movie ()%0Af @@ -1583,13 +1583,20 @@ vie. -t +MoviesT ask( +0 )%0A @@ -1739,12 +1739,16 @@ vie. -is_d +IsMovieD one( @@ -1768,23 +1768,21 @@ vie. -go_to_b +GoToB eginning ()%0A @@ -1777,16 +1777,23 @@ eginning +OfMovie ()%0A
c92caa1f00c984cf839ccf7c645d207e100eb874
Add test_invalid_image to test_image_validation module
test/server/test_image_validation.py
test/server/test_image_validation.py
from urlparse import urljoin from clientlib import ( make_example_shot, make_random_id, screenshots_session, example_images ) import random # Hack to make this predictable: random.seed(0) def test_invalid_image_url(): with screenshots_session() as user: shot_id = make_random_id() + "/test.com" shot_data = urljoin(user.backend, "data/" + shot_id) shot_json = make_example_shot(user.deviceId) invalid_url = "https://example.com/?aaA=bbb=\"); background-color: red;" for clip_id in shot_json['clips']: shot_json['clips'][clip_id]['image']['url'] = invalid_url break resp = user.session.put( shot_data, json=shot_json, ) print(resp.text) assert resp.status_code == 500 # assertion failure on clip image url def test_invalid_data_image(): with screenshots_session() as user: shot_url = user.create_shot(docTitle="TEST_JPEG", image_content_type="application/pdf", image_index=0) shot_page = user.read_shot(shot_url) assert shot_page["clip_content_type"] != "image/jpeg" def test_invalid_data_image_decoded(): pass def test_invalid_data_url(): pass if __name__ == "__main__": test_invalid_data_image() test_invalid_data_image_decoded() test_invalid_data_url()
Python
0.000001
@@ -148,16 +148,24 @@ t random +, string %0A%0A%0A# Hac @@ -941,213 +941,288 @@ hot_ -url = user.create_shot(docTitle=%22TEST_JPEG%22, image_content_type=%22application/pdf%22, image_index=0)%0A shot_page = user.read_shot(shot_url)%0A assert shot_page%5B%22clip_content_type%22%5D != %22image/jpeg%22%0A +id = make_random_id() + %22/test.com%22%0A shot_data = urljoin(user.backend, %22data/%22 + shot_id)%0A shot_json = make_example_shot(user.deviceId)%0A valid_data_image = example_images%5B'url'%5D%0A if %22iVBORw0KGgo%22 in valid_data_image:%0A print(valid_data_image) %0A%0Ade
2942f39534ca7b309e32268697350afaacad7274
TEST : Added small integrity test for sct_get_centerline
testing/test_sct_get_centerline.py
testing/test_sct_get_centerline.py
#!/usr/bin/env python ######################################################################################### # # Test function for sct_get_centerline script # # replace the shell test script in sct 1.0 # # --------------------------------------------------------------------------------------- # Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca> # Author: Augustin Roux # modified: 2014/09/28 # # About the license: see the file LICENSE.TXT ######################################################################################### import commands def test(path_data): # parameters folder_data = 't2/' file_data = ['t2.nii.gz', 't2_centerline_init.nii.gz', 't2_centerline_labels.nii.gz'] output = '' status = 0 # define command cmd = 'sct_get_centerline -i ' + path_data + folder_data + file_data[0] \ + ' -method auto' \ + ' -t t2 ' \ + ' -v 1' output += '\n====================================================================================================\n'+cmd+'\n====================================================================================================\n\n' # copy command s, o = commands.getstatusoutput(cmd) status += s output += o # define command: DOES NOT RUN IT BECAUSE REQUIRES FSL FLIRT # cmd = 'sct_get_centerline -i ' + path_data + folder_data + file_data[0] \ # + ' -method point' \ # + ' -p ' + path_data + folder_data + file_data[1] \ # + ' -g 1'\ # + ' -k 4' # output += '\n====================================================================================================\n'+cmd+'\n====================================================================================================\n\n' # copy command # s, o = commands.getstatusoutput(cmd) # status += s # output += o # define command cmd = 'sct_get_centerline -i ' + path_data + folder_data + file_data[0] \ + ' -method labels ' \ + ' -l ' + path_data + folder_data + file_data[2] \ + ' -v 1' output += '\n====================================================================================================\n'+cmd+'\n====================================================================================================\n\n' # copy command s, o = commands.getstatusoutput(cmd) status += s output += o return status, output if __name__ == "__main__": # call main function test()
Python
0
@@ -561,16 +561,140 @@ ommands%0A +from msct_image import Image%0Afrom sct_get_centerline import ind2sub%0Aimport math%0Aimport sct_utils as sct%0Aimport numpy as np%0A%0A %0Adef tes @@ -837,16 +837,40 @@ .nii.gz' +, 't2_seg_manual.nii.gz' %5D%0A%0A o @@ -1377,32 +1377,1752 @@ %0A output += o +%0A # small integrity test on scad%0A try :%0A if status == 0:%0A manual_seg = Image(path_data + folder_data + file_data%5B3%5D)%0A centerline_scad = Image(path_data + folder_data + file_data%5B0%5D)%0A centerline_scad.change_orientation()%0A manual_seg.change_orientation()%0A%0A from scipy.ndimage.measurements import center_of_mass%0A # find COM%0A iterator = range(manual_seg.data.shape%5B2%5D)%0A com_x = %5B0 for ix in iterator%5D%0A com_y = %5B0 for iy in iterator%5D%0A%0A for iz in iterator:%0A com_x%5Biz%5D, com_y%5Biz%5D = center_of_mass(manual_seg.data%5B:, :, iz%5D)%0A max_distance = %7B%7D%0A distance = %7B%7D%0A for iz in range(1, centerline_scad.data.shape%5B2%5D-1):%0A ind1 = np.argmax(centerline_scad.data%5B:, :, iz%5D)%0A X,Y = ind2sub(centerline_scad.data%5B:, :, iz%5D.shape,ind1)%0A com_phys = np.array(manual_seg.transfo_pix2phys(%5B%5Bcom_x%5Biz%5D, com_y%5Biz%5D, iz%5D%5D))%0A scad_phys = np.array(centerline_scad.transfo_pix2phys(%5B%5BX, Y, iz%5D%5D))%0A distance_magnitude = np.linalg.norm(%5Bcom_phys%5B0%5D%5B0%5D-scad_phys%5B0%5D%5B0%5D, com_phys%5B0%5D%5B1%5D-scad_phys%5B0%5D%5B1%5D, 0%5D)%0A if math.isnan(distance_magnitude):%0A print %22Value is nan%22%0A else:%0A distance%5Biz%5D = distance_magnitude%0A%0A max_distance = max(distance.values())%0A #if max_distance %3E 5:%0A #sct.printv(%22Max distance between scad and manual centerline is greater than 5 mm%22, type=%22warning%22)%0A%0A except Exception, e:%0A sct.printv(%22Exception found while testing scad integrity%22)%0A sct.printv(e.message, type=%22error%22) %0A%0A # define c
149452b0d571adc16ae97aabd6a3266ba001a854
Call `_update_conditionally` from `_append_conditionally`
app/main/services/process_request_json.py
app/main/services/process_request_json.py
import hashlib from itertools import chain import six from flask import request from werkzeug.exceptions import abort def _ensure_value_list(json_string_or_list): if isinstance(json_string_or_list, list): return json_string_or_list else: return [json_string_or_list] def _update_conditionally(arguments, document): """ A transformation processor that updates field values in "target field" when certain values are present in "field". The example use case is when we are converting awarded, unsuccessful or cancelled brief status to closed. :param arguments: dict -- the parameters to the processor as specified in configuration :param document: dict -- the submitted document that we are transforming """ _append_conditionally(arguments, document, update=True) def _append_conditionally(arguments, document, update=False): """ A transformation processor that generates new field values in "target field" when certain values are present in "field". The example use case is when we are adding parent categories, whenever any one of their subcategories is present. :param arguments: dict -- the parameters to the processor as specified in configuration :param document: dict -- the submitted document that we are transforming :param update: bool -- if true, then the target field is updated instead of appended to. See the function above. """ source_field = arguments['field'] target_field = arguments.get('target_field') or source_field if source_field in document: source_values = _ensure_value_list(document[source_field]) source_values_set = set(source_values) target_values = _ensure_value_list(document.get(target_field, [])) if any(value in source_values_set for value in arguments['any_of']): if update: document[target_field] = arguments['update_value'] else: target_values.extend(arguments['append_value']) # "append_value" key singular despite being a list, consistent with Elasticsearch practice document[target_field] = target_values def _hash_to(arguments, document): """ A transformation processor that performs a sha256 on the (utf8) string representation of the "field" and stores the (lowercase hex string) result on the document under a key specified by "target_field". If "target_field" is not specified, the source field will be overwritten with the result. :param arguments: dict -- the parameters to the processor as specified in configuration :param document: dict -- the submitted document that we are transforming """ source_field = arguments['field'] target_field = arguments.get('target_field') or source_field if source_field in document: document[target_field] = hashlib.sha256((six.text_type(document[source_field])).encode('utf-8')).hexdigest() TRANSFORMATION_PROCESSORS = { 'append_conditionally': _append_conditionally, 'update_conditionally': _update_conditionally, 'hash_to': _hash_to, } def convert_request_json_into_index_json(mapping, request_json): for transformation in mapping.transform_fields: # Each transformation is a dictionary, with a type mapping to the arguments pertaining to # that type. We anticipate only one type per transformation (consistent with how 'ingest # processors' are specified for Elasticsearch - see # <https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest-processors.html>). for transformation_type, transformation_arguments in transformation.items(): TRANSFORMATION_PROCESSORS[transformation_type](transformation_arguments, request_json) # build a dict: for each key/value in the request_json, look up mapping.prefixes_by_field to see how many # differently-prefixed variants that field has in the mapping and copy value verbatim to all those keys. it could # of course have no representation in the mapping, in which case it would be ignored. return dict(chain.from_iterable( ( ("_".join((prefix, key)), value) for prefix in mapping.prefixes_by_field.get(key, ()) ) for key, value in request_json.items() )) def check_json_from_request(request): if request.content_type not in ['application/json', 'application/json; charset=UTF-8']: abort(400, "Unexpected Content-Type, expecting 'application/json'") data = request.get_json() if data is None: abort(400, "Invalid JSON; must be a valid JSON object") return data def json_has_required_keys(data, keys): for key in keys: if key not in data.keys(): abort(400, "Invalid JSON must have '%s' key(s)" % keys) def get_json_from_request(root_field): payload = check_json_from_request(request) json_has_required_keys(payload, [root_field]) update_json = payload[root_field] return update_json
Python
0
@@ -294,22 +294,22 @@ %0A%0A%0Adef _ -update +append _conditi @@ -385,15 +385,21 @@ hat -updates +generates new fie @@ -518,76 +518,88 @@ are -converting awarded, unsuccessful or cancelled brief status to closed +adding parent categories, whenever any one of their subcategories%0A is present .%0A @@ -778,22 +778,22 @@ %22%22%0A _ -append +update _conditi @@ -820,22 +820,22 @@ cument, -update +append =True)%0A%0A @@ -832,38 +832,38 @@ nd=True)%0A%0A%0Adef _ -append +update _conditionally(a @@ -882,22 +882,22 @@ cument, -update +append =False): @@ -937,38 +937,32 @@ cessor that -gener +upd ates -new field values @@ -1072,88 +1072,76 @@ are -adding parent categories, wheneve +converting awarded, unsuccessful o r +c an -y one of their subcategories%0A is present +celled brief status to closed .%0A @@ -1318,22 +1318,22 @@ :param -update +append : bool - @@ -1372,38 +1372,35 @@ is -updated instead of appended to +appended to instead updated . Se @@ -1421,32 +1421,33 @@ above.%0A %22%22%22%0A +%0A source_field @@ -1853,99 +1853,14 @@ if -update:%0A document%5Btarget_field%5D = arguments%5B'update_value'%5D%0A else +append :%0A @@ -2083,16 +2083,101 @@ _values%0A + else:%0A document%5Btarget_field%5D = arguments%5B'update_value'%5D%0A %0A%0Adef _h
fcc5f3a8847dbbb7fc4f9b939dacacd340a314a2
Load top level dicts in init
medleydb/__init__.py
medleydb/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Python tools for using MedleyDB """ import logging from os import path from os import environ import warnings from medleydb.version import __version__ __all__ = ["__version__", "sql"] logging.basicConfig(level=logging.CRITICAL) if "MEDLEYDB_PATH" in environ and path.exists(environ["MEDLEYDB_PATH"]): MEDLEYDB_PATH = environ["MEDLEYDB_PATH"] AUDIO_AVAILABLE = True elif "MEDLEYDB_PATH" not in environ: warnings.warn( "The environment variable MEDLEYDB_PATH is not set. " "As a result, any part of the code that requires the audio won't work. " "If you don't need to access the audio, disregard this warning. " "If you do, set the environment variable MEDLEYDB_PATH to your " "local copy of MedeleyDB.", UserWarning ) MEDLEYDB_PATH = "" AUDIO_AVAILABLE = False else: MEDLEYDB_PATH = environ["MEDLEYDB_PATH"] warnings.warn( "The value set for MEDLEYDB_PATH: %s does not exist. " "As a result, any part of the code that requires the audio won't work. " "If you don't need to access the audio, disregard this warning. " "If you do, set the environment variable MEDLEYDB_PATH to your local " "copy of MedeleyDB." % MEDLEYDB_PATH, UserWarning ) AUDIO_AVAILABLE = False # The taxonomy, tracklist, annotations and metadata are version controlled and # stored inside the repository INST_TAXONOMY = path.join(path.dirname(__file__), 'taxonomy.yaml') TRACK_LIST = path.join(path.dirname(__file__), 'tracklist_v1.txt') ANNOT_PATH = path.join(path.dirname(__file__), '../', 'Annotations') METADATA_PATH = path.join(path.dirname(__file__), '../', 'Metadata') INST_F0_TYPE = path.join(path.dirname(__file__), 'instrument_f0_type.json') # Audio is downloaded separately and is not version controlled :'(. # This is the motivation for requesting the user to set the MEDLEYDB_PATH if AUDIO_AVAILABLE: AUDIO_PATH = path.join(MEDLEYDB_PATH, 'Audio') if not path.exists(AUDIO_PATH): AUDIO_PATH = None warnings.warn( "The medleydb audio was not found at the expected path: %s " "This module will still function, but without the " "ability to access any of the audio." % AUDIO_PATH, UserWarning ) else: AUDIO_PATH = None from .utils import ( load_melody_multitracks, load_all_multitracks, load_multitracks, get_files_for_instrument, preview_audio ) from .multitrack import ( MultiTrack, Track, get_duration, read_annotation_file, get_valid_instrument_labels, is_valid_instrument )
Python
0
@@ -153,16 +153,40 @@ warnings +%0Aimport yaml%0Aimport json %0A%0Afrom m @@ -1486,142 +1486,8 @@ ory%0A -INST_TAXONOMY = path.join(path.dirname(__file__), 'taxonomy.yaml')%0ATRACK_LIST = path.join(path.dirname(__file__), 'tracklist_v1.txt')%0A ANNO @@ -1624,23 +1624,330 @@ a')%0A -INST_F0_TYPE = +%0ATRACK_LIST = %5B%5D%0Awith open(path.join(path.dirname(__file__),%0A 'tracklist_v1.txt'), 'r') as fhandle:%0A for line in fhandle.readlines():%0A TRACK_LIST.append(line.strip('%5Cn'))%0A%0Awith open(path.join(path.dirname(__file__), 'taxonomy.yaml'), 'r') as f_handle:%0A INST_TAXONOMY = yaml.load(f_handle)%0A%0Awith open( path @@ -1975,16 +1975,26 @@ file__), +%0A 'instru @@ -2012,16 +2012,220 @@ e.json') +, 'r') as f_handle:%0A INST_F0_TYPE = json.load(f_handle)%0A%0Awith open(path.join(path.dirname(__file__),%0A 'mixing_coefficients.yaml'), 'r') as fhandle:%0A MIXING_COEFFICIENTS = yaml.load(fhandle) %0A%0A# Audi
33121b74419e9913e46e183914805d4a9db8f742
fix test to look for email instead of username
meetuppizza/tests.py
meetuppizza/tests.py
from django.test import TestCase from django.contrib.auth.models import User from django.test import Client from meetuppizza.forms import RegistrationForm import pdb class Test(TestCase): def setUp(self): self.params = { 'username':'Bjorn', 'email':'bjorn@bjorn.com', 'password1':'bjornbjorn', 'password2':'bjornbjorn' } def test_landing_page_is_there(self): response = self.client.get('/') self.assertEqual(response.status_code, 200) def test_page_contains_pizza(self): response = self.client.get('/') self.assertContains(response, "pizza") def test_signup_redirects(self): response = self.client.post('/sign_up', self.params, follow=True) self.assertRedirects(response, '/welcome') def test_user_is_created(self): c = Client() c.post('/sign_up', self.params) self.assertEqual(1, len(User.objects.all())) def test_user_is_logged_in_after_signup(self): c = Client() c.post('/sign_up', self.params) user = User.objects.get(username='Bjorn') self.assertTrue(user.is_authenticated()) def test_email_displayed_on_welcome_page(self): c = Client() c.post('/sign_up', self.params) response = c.get('/welcome') self.assertContains(response, "Bjorn")
Python
0
@@ -1255,17 +1255,27 @@ ponse, %22 -B +b jorn +@bjorn.com %22)%0A%0A
9e202e78a5737d8609dfc193b35797b2f5f4a7bb
Corrige le groupage des fichiers statiques saisies plusieurs fois.
static_grouper/templatetags/static_grouper.py
static_grouper/templatetags/static_grouper.py
from collections import defaultdict from compressor.templatetags.compress import CompressorNode from django.template import Library, Node, Template, TemplateSyntaxError register = Library() CONTEXT_VARIABLE_NAME = 'static_grouper_dict' class AddStaticNode(Node): def __init__(self, parser, token): contents = token.split_contents() if len(contents) not in (2, 3): raise TemplateSyntaxError if len(contents) == 3: assert contents[2] == 'nocompress' self.compress = False else: self.compress = True self.static_type = contents[1] self.nodelist = parser.parse(('endaddstatic',)) parser.delete_first_token() def render(self, context): output = self.nodelist.render(context).strip() static_grouper_dict = context.get(CONTEXT_VARIABLE_NAME) if static_grouper_dict is None: root_context = context.dicts[0] root_context[CONTEXT_VARIABLE_NAME] = \ static_grouper_dict = defaultdict(list) if output not in static_grouper_dict[self.static_type]: static_grouper_dict[self.static_type].append( (self.compress, output)) return '' register.tag('addstatic', AddStaticNode) class StaticListNode(Node): def __init__(self, parser, token): contents = token.split_contents() if len(contents) not in (2, 3): raise TemplateSyntaxError self.static_type = contents[1] if len(contents) == 3: assert contents[2] == 'compress' self.compress = True else: self.compress = False self.following_nodelist = parser.parse() def groups_iterator(self, static_grouper_dict): compressed_group = [] for compress, output in static_grouper_dict[self.static_type]: if compress: compressed_group.append(output) else: if compressed_group: yield True, ''.join(compressed_group) compressed_group = [] yield False, output if compressed_group: yield True, ''.join(compressed_group) def render(self, context): static_grouper_dict = context.get(CONTEXT_VARIABLE_NAME, defaultdict(list)) following = self.following_nodelist.render(context) inner = '' for compress, output in self.groups_iterator(static_grouper_dict): if compress and self.compress: inner += CompressorNode( nodelist=Template(output).nodelist, kind=self.static_type, mode='file').render(context=context) else: inner += output return inner + following register.tag('static_list', StaticListNode)
Python
0
@@ -1071,16 +1071,53 @@ i -f output +tem = (self.compress, output)%0A if item not @@ -1220,48 +1220,12 @@ end( -%0A (self.compress, output) +item )%0A
40e295ddf91bb746ac3f743d675c6117d183340d
Allow scanning for clients when using GDM.all()
plexapi/gdm.py
plexapi/gdm.py
""" Support for discovery using GDM (Good Day Mate), multicast protocol by Plex. # Licensed Apache 2.0 # From https://github.com/home-assistant/netdisco/netdisco/gdm.py Inspired by: hippojay's plexGDM: https://github.com/hippojay/script.plexbmc.helper/resources/lib/plexgdm.py iBaa's PlexConnect: https://github.com/iBaa/PlexConnect/PlexAPI.py """ import socket import struct class GDM: """Base class to discover GDM services.""" def __init__(self): self.entries = [] self.last_scan = None def scan(self, scan_for_clients=False): """Scan the network.""" self.update(scan_for_clients) def all(self): """Return all found entries. Will scan for entries if not scanned recently. """ self.scan() return list(self.entries) def find_by_content_type(self, value): """Return a list of entries that match the content_type.""" self.scan() return [entry for entry in self.entries if value in entry['data']['Content_Type']] def find_by_data(self, values): """Return a list of entries that match the search parameters.""" self.scan() return [entry for entry in self.entries if all(item in entry['data'].items() for item in values.items())] def update(self, scan_for_clients): """Scan for new GDM services. Examples of the dict list assigned to self.entries by this function: Server: [{'data': { 'Content-Type': 'plex/media-server', 'Host': '53f4b5b6023d41182fe88a99b0e714ba.plex.direct', 'Name': 'myfirstplexserver', 'Port': '32400', 'Resource-Identifier': '646ab0aa8a01c543e94ba975f6fd6efadc36b7', 'Updated-At': '1585769946', 'Version': '1.18.8.2527-740d4c206', }, 'from': ('10.10.10.100', 32414)}] Clients: [{'data': {'Content-Type': 'plex/media-player', 'Device-Class': 'stb', 'Name': 'plexamp', 'Port': '36000', 'Product': 'Plexamp', 'Protocol': 'plex', 'Protocol-Capabilities': 'timeline,playback,playqueues,playqueues-creation', 'Protocol-Version': '1', 'Resource-Identifier': 'b6e57a3f-e0f8-494f-8884-f4b58501467e', 'Version': '1.1.0', }, 'from': ('10.10.10.101', 32412)}] """ gdm_msg = 'M-SEARCH * HTTP/1.0'.encode('ascii') gdm_timeout = 1 self.entries = [] known_responses = [] # setup socket for discovery -> multicast message sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(gdm_timeout) # Set the time-to-live for messages for local network sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("B", gdm_timeout)) if scan_for_clients: # setup socket for broadcast to Plex clients sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) gdm_ip = '255.255.255.255' gdm_port = 32412 else: # setup socket for multicast to Plex server(s) gdm_ip = '239.0.0.250' gdm_port = 32414 try: # Send data to the multicast group sock.sendto(gdm_msg, (gdm_ip, gdm_port)) # Look for responses from all recipients while True: try: bdata, host = sock.recvfrom(1024) data = bdata.decode('utf-8') if '200 OK' in data.splitlines()[0]: ddata = {k: v.strip() for (k, v) in ( line.split(':') for line in data.splitlines() if ':' in line)} identifier = ddata.get('Resource-Identifier') if identifier and identifier in known_responses: continue known_responses.append(identifier) self.entries.append({'data': ddata, 'from': host}) except socket.timeout: break finally: sock.close() def main(): """Test GDM discovery.""" from pprint import pprint gdm = GDM() pprint("Scanning GDM for servers...") gdm.scan() pprint(gdm.entries) pprint("Scanning GDM for clients...") gdm.scan(scan_for_clients=True) pprint(gdm.entries) if __name__ == "__main__": main()
Python
0
@@ -644,24 +644,48 @@ def all(self +, scan_for_clients=False ):%0A %22 @@ -790,32 +790,48 @@ self.scan( +scan_for_clients )%0A return
bf4cf008fb8eadd5a0b8b23a330a49fdea272314
Convert exception to string
tests/cases/cloud_provider_test.py
tests/cases/cloud_provider_test.py
import unittest import os from cumulus.ansible.tasks.providers import CloudProvider, EC2Provider class CloudProviderTestCase(unittest.TestCase): def setup(self): pass def tearDown(self): pass def test_empty_profile(self): with self.assertRaises(AssertionError) as context: p = CloudProvider({}) self.assertTrue('Profile does not have a "cloudProvider" attribute' in context.exception) def test_ec2_profile(self): p = CloudProvider({'cloudProvider': 'ec2'}) self.assertTrue(isinstance(p, EC2Provider))
Python
0.999979
@@ -443,16 +443,20 @@ in +str( context. @@ -465,16 +465,17 @@ ception) +) %0A%0A de
49da0ed5340bac53a953618bb684f2525a70c8d8
improve output
systest_manager/handlers/openstack_handler.py
systest_manager/handlers/openstack_handler.py
import time import keystoneclient.v2_0.client as keystone_client import neutronclient.v2_0.client as neutron_client import cinderclient.v1.client as cinder_client import novaclient.v2.client as nova_client class CleanupHandler(object): def __init__(self, configuration): self.should_delete_keypairs = configuration.handler_configuration.get( 'delete_keypairs', False) keys, neut, nova, cind = self._connect(configuration.inputs) self.keys = keys self.neut = neut self.nova = nova self.cind = cind def cleanup(self): self.delete_servers() self.delete_keys() self.delete_volumes() self.delete_routers() self.delete_ports() self.delete_networks() self.delete_security_groups() self.delete_floatingips() print "Done!" def delete_servers(self): print "Deleting Servers" for server in self.nova.servers.list(): print "\tDeleting server {0}".format(server.name) self.nova.servers.delete(server.id) while self.nova.servers.list(): print "Waiting for all servers to delete..." time.sleep(5) def delete_keys(self): print "Deleting Keypairs" for keypair in self.nova.keypairs.list(): if self.should_delete_keypairs: print "\tDeleting keypair {0}".format(keypair.name) self.nova.keypairs.delete(keypair.id) else: print "\tSkipping keypair {0}".format(keypair.name) def delete_volumes(self): print "Deleting Volumes" for volume in self.cind.volumes.list(): print "\tdeleting volume {0}".format(volume.display_name) self.cind.volumes.delete(volume.id) while self.cind.volumes.list(): print "Waiting for all volumes to delete..." time.sleep(5) def delete_routers(self): print "Deleting Routers" for router in self.neut.list_routers()['routers']: for port in self.neut.list_ports(device_id=router['id'])['ports']: subnet_id = port['fixed_ips'][0]['subnet_id'] print "\tDeleting router interface to subnet ID {0}".format( subnet_id) self.neut.remove_interface_router(router['id'], {'subnet_id': subnet_id}) print "\tDeleting router {0}".format(router['name']) self.neut.delete_router(router['id']) def delete_ports(self): print "Deleting Ports" for port in self.neut.list_ports()['ports']: print "Deleting port {0}".format(port['name']) self.neut.delete_port(port['id']) def delete_networks(self): print "Deleting Networks" for network in self.neut.list_networks()['networks']: if not network['router:external']: print "Deleting network {0}".format(network['name']) self.neut.delete_network(network['id']) def delete_security_groups(self): print "Deleting Security Groups" for sg in self.neut.list_security_groups()['security_groups']: if sg['name'] != 'default': print "Deleting security group {0}".format(sg['name']) self.neut.delete_security_group(sg['id']) def delete_floatingips(self): print "Deleting Floating IPs" for fip in self.neut.list_floatingips()['floatingips']: print "Deleting floating IP {0}".format(fip['floating_ip_address']) self.neut.delete_floatingip(fip['id']) @staticmethod def _connect(inputs): username = inputs['keystone_username'] password = inputs['keystone_password'] tenant_name = inputs['keystone_tenant_name'] region_name = inputs['region'] auth_url = inputs['keystone_url'] clients_std_keys_kw = { 'username': username, 'password': password, 'tenant_name': tenant_name, 'auth_url': auth_url } clients_old_keys_kw = { 'username': username, 'api_key': password, 'project_id': tenant_name, 'auth_url': auth_url, 'region_name': region_name } keys = keystone_client.Client(**clients_std_keys_kw) clients_std_keys_kw['region_name'] = region_name neut = neutron_client.Client(**clients_std_keys_kw) nova = nova_client.Client(**clients_old_keys_kw) cind = cinder_client.Client(**clients_old_keys_kw) return keys, neut, nova, cind
Python
0.999999
@@ -2667,32 +2667,34 @@ print %22 +%5Ct Deleting port %7B0 @@ -2709,32 +2709,32 @@ t(port%5B'name'%5D)%0A - self @@ -2953,32 +2953,34 @@ print %22 +%5Ct Deleting network @@ -3271,32 +3271,34 @@ print %22 +%5Ct Deleting securit @@ -3535,32 +3535,34 @@ print %22 +%5Ct Deleting floatin @@ -3578,16 +3578,33 @@ .format( +%0A fip%5B'flo
0ac1cdfd59199d3c36ddbccc7c5004261b57f7be
Add api.python.failing_step
recipe_modules/python/api.py
recipe_modules/python/api.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from slave import recipe_api from slave import recipe_util import textwrap class PythonApi(recipe_api.RecipeApi): def __call__(self, name, script, args=None, unbuffered=True, **kwargs): """Return a step to run a python script with arguments.""" cmd = ['python'] if unbuffered: cmd.append('-u') cmd.append(script) return self.m.step(name, cmd + list(args or []), **kwargs) def inline(self, name, program, add_python_log=True, **kwargs): """Run an inline python program as a step. Program is output to a temp file and run when this step executes. """ program = textwrap.dedent(program) compile(program, '<string>', 'exec', dont_inherit=1) try: self(name, self.m.raw_io.input(program, '.py'), **kwargs) finally: result = self.m.step.active_result if add_python_log: result.presentation.logs['python.inline'] = program.splitlines() return result
Python
0.000053
@@ -1088,12 +1088,427 @@ turn result%0A +%0A def failing_step(self, name, text):%0A %22%22%22Return a failng step (correctly recognized in expectations).%22%22%22%0A try:%0A self.inline(name,%0A 'import sys; sys.exit(1)',%0A add_python_log=False,%0A step_test_data=lambda: self.m.raw_io.test_api.output(%0A text, retcode=1))%0A finally:%0A self.m.step.active_result.presentation.step_text = text%0A
ad4fd2ce424cb4b4f9ffbd11b5caa1d41d7ccf0c
Support for with statement for backpropagation class
tensor_network/neural_net/back_propagation.py
tensor_network/neural_net/back_propagation.py
import logging import tempfile import tensorflow as tf def initialize_weight(input_count, output_count): return tf.Variable(tf.truncated_normal([input_count, output_count]), name='W') def initialize_bias(output): return tf.Variable(tf.zeros([output]), name='B') def initialize_layer(layer_info, name='layer'): with tf.name_scope(name): return initialize_weight(layer_info[0], layer_info[1]), initialize_bias(layer_info[1]) def initialize_network(input_count, list_of_neurons): list1 = [input_count] + list_of_neurons # list2 = list_of_neurons + [output_count] list2 = list_of_neurons layers = [] i = 1 for layer_info in list(zip(list1, list2)): layers.append(initialize_layer(layer_info, name='layer' + str(i))) i += 1 return layers def tensor_network(first_layer_input, list_of_neurons): return initialize_network(first_layer_input, list_of_neurons) def evaluate_network(network, input_data): res = input_data for _tuple in network: w = _tuple[0] b = _tuple[1] res = tf.nn.sigmoid(tf.matmul(res, w) + b) return res def cost_function(actual_y, predicted_y): return tf.reduce_mean( -tf.reduce_sum((actual_y * tf.log(predicted_y + 1e-10) + ((1 - actual_y) * tf.log(1 - predicted_y + 1e-10))), axis=[1])) class BackPropagation: def __init__(self, number_of_features, number_of_output, neurons_list): self.session = tf.Session() self.x = tf.placeholder(tf.float32, [None, number_of_features]) self.y = tf.placeholder(tf.float32, [None, number_of_output]) self.validation_x = tf.placeholder(tf.float32, [None, number_of_features]) self.validation_y = tf.placeholder(tf.float32, [None, number_of_output]) self.network = list(tensor_network(number_of_features, neurons_list)) self.model = evaluate_network(self.network, self.x) self.validation_y_pred = evaluate_network(self.network, self.validation_x) self.cost_function = cost_function(self.y, self.model) self.accuracy = cost_function(self.validation_y, self.validation_y_pred) def __del__(self): self.session.close() def train(self, train_data, validation_data=None, iterations=10000, optimiser=tf.train.GradientDescentOptimizer(learning_rate=0.05), import_prev_model=False, frequency=10, folder=tempfile.gettempdir() + "/tensorflow"): (train_input, train_output) = train_data (validation_input, validation_output) = train_data if validation_data is None else validation_data tensorflow_dir = folder log_dir = tensorflow_dir + "/log" model_file = tensorflow_dir + "/model/model_data" logging.info("Logging TensorFlow data to %s " % log_dir) writer = tf.summary.FileWriter(log_dir) writer.add_graph(self.session.graph) tf.summary.scalar('cost', self.cost_function) tf.summary.scalar('accuracy', self.accuracy) merged_summary = tf.summary.merge_all() with tf.name_scope("train"): train_step = optimiser.minimize(self.cost_function, name="train_step") saver = tf.train.Saver(max_to_keep=1) if import_prev_model: saver.restore(self.session, model_file) else: self.session.run(tf.global_variables_initializer()) for i in range(iterations): if frequency != 0 and i % (iterations / frequency) == 0: accuracy = self.validation(validation_input, validation_output) cost = self.session.run(self.cost_function, feed_dict={self.x: train_input, self.y: train_output}) print("Iterations = %s and Cost = %s and accuracy = %s" % (i, cost, accuracy)) summary = self.session.run(merged_summary, feed_dict={self.x: train_input, self.y: train_output, self.validation_x: validation_input, self.validation_y: validation_output}) writer.add_summary(summary, i) saver.save(self.session, model_file) self.session.run(train_step, feed_dict={self.x: train_input, self.y: train_output, self.validation_x: validation_input, self.validation_y: validation_output}) def predict(self, test_input_data): return self.session.run(self.model, feed_dict={self.x: test_input_data}) def validation(self, validation_input_data, validation_output_data): with tf.name_scope("validation"): predictions = self.predict(validation_input_data) return self.session.run(self.cost_function, feed_dict={self.y: validation_output_data, self.model: predictions})
Python
0
@@ -2194,26 +2194,290 @@ -self.session.close +tf.reset_default_graph()%0A self.session.close()%0A%0A def __enter__(self):%0A self.session.__enter__()%0A return self%0A%0A def __exit__(self, exec_type, exec_value, exec_tb):%0A self.session.__exit__(exec_type, exec_value, exec_tb)%0A tf.reset_default_graph ()%0A%0A
b1653d7a9589766a86141034865d9023b1f75fad
Fix as_tensor_test
tests/as_tensor_test.py
tests/as_tensor_test.py
import unittest from as_tensor import as_tensor class AsTensorTest(unittest.TestCase): pass _FIXTURES = [ ('string', 'string', [], 'string'), ('list', ['string'], [None], ['string']), ('list', ['string'], [1], ['string']), ('generator', (s for s in ['string']), [1], ['string']), ('emptylist', [], [None], []), ('emptylist', [], [0], []), ('nested_list', [['string'], ['foo']], [2,1], [['string'], ['foo']]), ] for (name, expect, shape, data) in _FIXTURES: def do_test(self): result = as_tensor(data, shape) self.assertEqual(expect, result) setattr(AsTensorTest, 'test_%s' % name, do_test) if __name__ == '__main__': unittest.main()
Python
0.999412
@@ -15,16 +15,20 @@ t%0A%0Afrom +tfi. as_tenso @@ -47,16 +47,53 @@ tensor%0A%0A +from functools import partialmethod%0A%0A class As @@ -175,16 +175,21 @@ ng', %5B%5D, + str, 'string @@ -224,16 +224,21 @@ %5BNone%5D, + str, %5B'strin @@ -268,24 +268,29 @@ ring'%5D, %5B1%5D, + str, %5B'string'%5D) @@ -338,16 +338,21 @@ %5D), %5B1%5D, + str, %5B'strin @@ -386,16 +386,23 @@ %5BNone%5D, + float, %5B%5D),%0A @@ -425,16 +425,23 @@ %5B%5D, %5B0%5D, + float, %5B%5D),%0A @@ -487,16 +487,21 @@ , %5B2,1%5D, + str, %5B%5B'stri @@ -534,27 +534,13 @@ me, -expect, shape, data +*rest ) in @@ -571,16 +571,44 @@ est(self +, expect, shape, dtype, data ):%0A @@ -640,16 +640,23 @@ a, shape +, dtype )%0A @@ -716,16 +716,28 @@ sorTest, +%0A 'test_%25 @@ -746,24 +746,58 @@ %25 name, - do_test +%0A partialmethod(do_test, *rest) )%0A%0Aif __
55726db079313570fb9889ae91a4664f2e2daa98
add buttons to choose task
methods/homeworks.py
methods/homeworks.py
from enum import Enum, auto from telegram.ext import CommandHandler, MessageHandler, Filters from telegram.ext.conversationhandler import ConversationHandler from telegram.message import Message from telegram.update import Update from lyceum_api import get_check_queue from lyceum_api.issue import QueueTask from methods.auth import get_user class State(Enum): not_logged_in = auto() def handle_hw(bot, update: Update): user = get_user(update.message) if not user: update.message.reply_text('Not logged in') return ConversationHandler.END q = [QueueTask(t) for t in get_check_queue(user.sid)] tasks = ('Задания на проверку:\n' + '\n'.join('{} -- {}'.format(t.task_title, t.student_name) for t in q)) update.message.reply_text(tasks) return ConversationHandler.END # def on_choose(bot, update): # message: Message = update.message conv_handler = ConversationHandler( entry_points=[CommandHandler('hw', handle_hw, Filters.private)], states={ # States.username: [MessageHandler(Filters.text, # handle_username, # pass_user_data=True)], # States.password: [MessageHandler(Filters.text, # handle_password, # pass_user_data=True)] }, fallbacks=[] )
Python
0.000017
@@ -189,16 +189,77 @@ Message%0A +from telegram.replykeyboardmarkup import ReplyKeyboardMarkup%0A from tel @@ -702,59 +702,10 @@ s = -('%D0%97%D0%B0%D0%B4%D0%B0%D0%BD%D0%B8%D1%8F %D0%BD%D0%B0 %D0%BF%D1%80%D0%BE%D0%B2%D0%B5%D1%80%D0%BA%D1%83:%5Cn' +%0A '%5Cn'.join( +%5B%5B '%7B%7D @@ -747,16 +747,17 @@ nt_name) +%5D for t i @@ -763,47 +763,186 @@ in q -))%0A%0A update.message.reply_text(tasks +%5D%0A%0A markup = ReplyKeyboardMarkup(tasks, one_time_keyboard=True)%0A update.message.reply_text('%D0%92%D1%8B%D0%B1%D0%B5%D1%80%D0%B8%D1%82%D0%B5 %D0%B7%D0%B0%D0%B4%D0%B0%D0%BD%D0%B8%D0%B5 %D0%BD%D0%B0 %D0%BF%D1%80%D0%BE%D0%B2%D0%B5%D1%80%D0%BA%D1%83',%0A reply_markup=markup )%0A
fd61f3cfbcd520b1b5fc9208c553ee946cced517
Remove duplicates from compression levels tests
tests/frame/conftest.py
tests/frame/conftest.py
import pytest # import random import lz4.frame as lz4frame @pytest.fixture( params=[ (lz4frame.BLOCKSIZE_DEFAULT), (lz4frame.BLOCKSIZE_MAX64KB), (lz4frame.BLOCKSIZE_MAX256KB), (lz4frame.BLOCKSIZE_MAX1MB), (lz4frame.BLOCKSIZE_MAX4MB), ] ) def block_size(request): return request.param @pytest.fixture( params=[ (lz4frame.BLOCKMODE_LINKED), (lz4frame.BLOCKMODE_INDEPENDENT), ] ) def block_mode(request): return request.param @pytest.fixture( params=[ (lz4frame.CONTENTCHECKSUM_DISABLED), (lz4frame.CONTENTCHECKSUM_ENABLED), ] ) def content_checksum(request): return request.param compression_levels = list(range(-5, 13)) + [ lz4frame.COMPRESSIONLEVEL_MIN, lz4frame.COMPRESSIONLEVEL_MINHC, lz4frame.COMPRESSIONLEVEL_MAX, ] compression_levels = [ # Although testing with all compression levels is desirable, the number of # tests becomes too large. So, we'll select some compression levels at # random. # (i) for i in random.sample(set(compression_levels), k=2) (i) for i in compression_levels ] @pytest.fixture( params=compression_levels ) def compression_level(request): return request.param @pytest.fixture( params=[ (True), (False) ] ) def auto_flush(request): return request.param @pytest.fixture( params=[ (True), (False) ] ) def store_size(request): return request.param
Python
0.000001
@@ -1124,16 +1124,20 @@ or i in +set( compress @@ -1146,16 +1146,17 @@ n_levels +) %0A%5D%0A@pyte
f6861a57069306046f4d9b40daaede06d3618a53
Lowercase for consistency
tests/generate_tests.py
tests/generate_tests.py
from __future__ import print_function from subprocess import check_output from os import listdir from os.path import join from time import sleep from utils import Colour, FoundError, getCurrentAbsolutePath, existsIn, EXEC, WHITE_LISTED_EXTENSIONS dir_path = getCurrentAbsolutePath(__file__) # (path/to/tests, infrared_command) LEXER_TESTS = (join(dir_path, "lexer"), "tokenize") PARSER_TESTS = (join(dir_path, "parser"), "parse") jobs = [ LEXER_TESTS, PARSER_TESTS ] for job in jobs: print(Colour.BOLD + "\nGENERATING TESTS: " + Colour.END + job[0]) try: # We don't want to check any dotfiles in these directories directories = [f for f in listdir(job[0]) if f[0] != "."] except: print("Directory was not found: " + job[0]) continue for path in directories: real_path = join(job[0], path) print(Colour.LIGHT_GRAY + u'\u25F4' + " BUILDING " + Colour.END + path, end="\r") try: # Find test file (we only expect 1 file at the moment) files = listdir(real_path) files_valid = [f for f in files if existsIn(WHITE_LISTED_EXTENSIONS, f[-3:])] if len(files_valid) != 1: raise file = join(real_path, files_valid[0]) output = check_output([EXEC, job[1], file]) # Check output for simple errors if (output.find("Syntax_Error") != -1 or output.find("Unknown_Token") != -1): raise FoundError # Create expected output file file_exp_name = file[:-3] + ".exp" file_exp = open(file_exp_name, "w") file_exp.write(output) file_exp.close() print(Colour.GREEN + u'\u2714' + " DONE " + Colour.END + path + " ") except FoundError: print(Colour.RED + u'\u2715' + " FAIL " + Colour.END + path + ": " + Colour.LIGHT_GRAY + "Syntax_Error or Unknown_Token encountered" + Colour.END) except: print(Colour.RED + u'\u2715' + " ERROR " + Colour.END + path + " ")
Python
0.998717
@@ -1736,12 +1736,12 @@ + %22 -DONE +done %22 + @@ -1845,12 +1845,12 @@ + %22 -FAIL +fail %22 + @@ -2039,13 +2039,13 @@ + %22 -ERROR +error %22 +
59fcdd60b9d612609e49890b868aaee9347616a9
Version bump.
pelican_flickrtag/__init__.py
pelican_flickrtag/__init__.py
__title__ = 'pelican-flickrtag' __version__ = '0.3.1' __author__ = 'Chris Streeter' __license__ = 'MIT' __copyright__ = 'Copyright 2013' from pelican_flickrtag.plugin import register
Python
0
@@ -44,17 +44,17 @@ = '0.3. -1 +2 '%0A__auth
83edbf1bd6807eca8cdf30db1b1e4493881104d4
Modify splash container to listen only locally
detectem/utils.py
detectem/utils.py
import hashlib import json import logging import pprint import time from contextlib import contextmanager import docker import requests from detectem.exceptions import DockerStartError from detectem.settings import ( CMD_OUTPUT, DOCKER_SPLASH_IMAGE, JSON_OUTPUT, SETUP_SPLASH, SPLASH_MAX_TIMEOUT, SPLASH_URL, ) logger = logging.getLogger("detectem") def get_most_complete_pm(pms): """ Return plugin match with longer version, if not available will return plugin match with ``presence=True`` """ if not pms: return None selected_version = None selected_presence = None for pm in pms: if pm.version: if not selected_version: selected_version = pm else: if len(pm.version) > len(selected_version.version): selected_version = pm elif pm.presence: selected_presence = pm return selected_version or selected_presence def docker_error(method): def run_method(self=None): try: method(self) except docker.errors.DockerException as e: raise DockerStartError(f"Docker error: {e}") return run_method class DockerManager: """ Wraps requests to Docker daemon to manage Splash container. """ def __init__(self): try: self.docker_cli = docker.from_env(version="auto") self.container_name = "splash-detectem" except docker.errors.DockerException: raise DockerStartError( "Could not connect to Docker daemon. " "Please ensure Docker is running." ) def _get_splash_args(self): return f"--max-timeout {SPLASH_MAX_TIMEOUT}" def _get_container(self): try: return self.docker_cli.containers.get(self.container_name) except docker.errors.NotFound: try: return self.docker_cli.containers.create( name=self.container_name, image=DOCKER_SPLASH_IMAGE, ports={"5023/tcp": 5023, "8050/tcp": 8050, "8051/tcp": 8051}, command=self._get_splash_args(), ) except docker.errors.ImageNotFound: raise DockerStartError( f"Docker image {DOCKER_SPLASH_IMAGE} not found." f"Please install it or set an image " f"using DOCKER_SPLASH_IMAGE environment variable." ) @docker_error def start_container(self): container = self._get_container() if container.status != "running": try: container.start() self._wait_container() except docker.errors.APIError as e: raise DockerStartError( f"There was an error running Splash container: {e.explanation}" ) def _wait_container(self): for t in [1, 2, 4, 6, 8, 10]: try: requests.get(f"{SPLASH_URL}/_ping") break except requests.exceptions.RequestException: time.sleep(t) else: raise DockerStartError( "Could not connect to started Splash container. " "See 'docker logs splash-detectem' for more details, " "or remove the container to try again." ) @contextmanager def docker_container(): """ Start the Splash server on a Docker container. If the container doesn't exist, it is created and named 'splash-detectem'. """ if SETUP_SPLASH: dm = DockerManager() dm.start_container() try: requests.post(f"{SPLASH_URL}/_gc") except requests.exceptions.RequestException: pass yield def create_printer(oformat): if oformat == CMD_OUTPUT: return pprint.pprint elif oformat == JSON_OUTPUT: def json_printer(data): print(json.dumps(data)) return json_printer def get_url(entry): """ Return URL from response if it was received otherwise requested URL. """ try: return entry["response"]["url"] except KeyError: return entry["request"]["url"] def get_response_body(entry): return entry["response"]["content"]["text"] def get_version_via_file_hashes(plugin, entry): file_hashes = getattr(plugin, "file_hashes", {}) if not file_hashes: return url = get_url(entry) body = get_response_body(entry).encode("utf-8") for file, hash_dict in file_hashes.items(): if file not in url: continue m = hashlib.sha256() m.update(body) h = m.hexdigest() for version, version_hash in hash_dict.items(): if h == version_hash: return version
Python
0
@@ -2109,60 +2109,200 @@ ts=%7B -%225023/tcp%22: 5023, %228050/tcp%22: 8050, %228051/tcp%22: 8051 +%0A %225023/tcp%22: (%22127.0.0.1%22, 5023),%0A %228050/tcp%22: (%22127.0.0.1%22, 8050),%0A %228051/tcp%22: (%22127.0.0.1%22, 8051),%0A %7D,%0A
ce628425aab91195cdcb35c048162b7fed240381
check for instagram_access_token settings before accessing it to fix failing unit tests
redwind/plugins/instagram.py
redwind/plugins/instagram.py
from .. import app from .. import db from .. import util from ..models import Post, Setting, get_settings, Context from .. import hooks from .. import queue from flask.ext.login import login_required from flask import request, redirect, url_for, render_template, flash,\ has_request_context import requests import urllib import re import datetime PERMALINK_RE = re.compile(r'https?://(?:www\.|mobile\.)?instagram\.com/p/(\w+)/?') def register(): hooks.register('create-context', create_context) hooks.register('post-saved', send_to_instagram) @app.route('/authorize_instagram') @login_required def authorize_instagram(): redirect_uri = url_for('authorize_instagram', _external=True) code = request.args.get('code') if not code: # redirect to instagram authorization page params = { 'client_id': get_settings().instagram_client_id, 'redirect_uri': redirect_uri, 'response_type': 'code', 'scope': 'likes comments', } return redirect('https://api.instagram.com/oauth/authorize/?' + urllib.parse.urlencode(params)) params = { 'client_id': get_settings().instagram_client_id, 'client_secret': get_settings().instagram_client_secret, 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri, 'code': code, } result = requests.post( 'https://api.instagram.com/oauth/access_token', data=params) app.logger.debug('received result %s', result) payload = result.json() access_token = payload.get('access_token') Setting.query.get('instagram_access_token').value = access_token db.session.commit() return redirect(url_for('edit_settings')) def create_context(url): m = PERMALINK_RE.match(url) if not m: app.logger.debug('url is not an instagram media url %s', url) return r = ig_get('https://api.instagram.com/v1/media/shortcode/' + m.group(1)) if r.status_code // 2 != 100: app.logger.warn("failed to fetch instagram media %s %s", r, r.content) return blob = r.json() author = blob.get('data', {}).get('user', {}) author_name = author.get('full_name') author_image = author.get('profile_picture') author_url = author.get('website') created_time = blob.get('data', {}).get('created_time') caption_text = blob.get('data', {}).get('caption', {}).get('text') images = blob.get('data', {}).get('images', {}) image = images.get('standard_resolution').get('url') if created_time: published = datetime.datetime.fromtimestamp(int(created_time)) content = '' if caption_text: content += '<p>' + caption_text + '</p>' if image: content += '<img src="' + image + '"/>' context = Context() context.url = context.permalink = url context.author_name = author_name context.author_image = author_image context.author_url = author_url context.published = published context.title = None context.content = content context.content_plain = caption_text app.logger.debug('created instagram context %s', context) return context def send_to_instagram(post, args): """Share a like to Instagram without user-input. """ if not is_instagram_authorized(): return False, 'Current user is not authorized for instagram' app.logger.debug("queueing post to instagram {}".format(post.id)) queue.enqueue(do_send_to_instagram, post.id) return True, 'Success' def do_send_to_instagram(post_id): app.logger.debug('posting to instagram %d', post_id) post = Post.load_by_id(post_id) in_reply_to, repost_of, like_of \ = util.posse_post_discovery(post, PERMALINK_RE) # likes are the only thing we can POSSE to instagram unfortunately if like_of: m = PERMALINK_RE.match(like_of) shortcode = m.group(1) r = ig_get('https://api.instagram.com/v1/media/shortcode/' + m.group(1)) if r.status_code // 2 != 100: app.logger.warn("failed to fetch instagram media %s %s", r, r.content) return None media_id = r.json().get('data', {}).get('id') if not media_id: app.logger.warn('could not find media id for shortcode %s', shortcode) return None r = ig_get('https://api.instagram.com/v1/users/self') my_username = r.json().get('data', {}).get('username') r = ig_post('https://api.instagram.com/v1/media/' + media_id + '/likes') if r.status_code // 2 != 100: app.logger.warn("failed to POST like for instagram id %s", media_id) return None like_url = like_of + '#liked-by-' + my_username new_synd = list(post.syndication) new_synd.append(like_url) post.syndication = new_synd db.session.commit() return like_url def ig_get(url): return requests.get(url, params={ 'access_token': get_settings().instagram_access_token, }) def ig_post(url): return requests.post(url, data={ 'access_token': get_settings().instagram_access_token, }) def is_instagram_authorized(): return get_settings().instagram_access_token
Python
0
@@ -5298,16 +5298,17 @@ %7D)%0A%0A +%0A def is_i @@ -5341,16 +5341,83 @@ return +(hasattr(get_settings(), 'instagram_access_token')%0A and get_sett @@ -5433,21 +5433,22 @@ stagram_access_token +) %0A
f6a4a8245c2b88614c7e182021d16503fdc6feab
Fix lint errors in the trigger_reindex script
reindexer/trigger_reindex.py
reindexer/trigger_reindex.py
#!/usr/bin/env python # -*- encoding: utf-8 """ Create/update reindex shards in the reindex shard tracker table. Usage: trigger_reindex.py --source=<SOURCE_NAME> --reason=<REASON> trigger_reindex.py -h | --help Options: --source=<SOURCE_NAME> Name of the source you want to reindex. --reason=<REASON> An explanation of why you're running this reindex. This will be printed in the Slack alert. -h --help Print this help message """ import json import os import subprocess import sys import boto3 import docopt import hcl import requests import tqdm from dynamodb_capacity_helpers import ( get_dynamodb_max_table_capacity, get_dynamodb_max_gsi_capacity, set_dynamodb_table_capacity, set_dynamodb_gsi_capacity ) # Reindex shards are added by a "reindex_shard_generator" Lambda. # Import the utility code that assigns reindex shards. ROOT = subprocess.check_output( ['git', 'rev-parse', '--show-toplevel']).decode('utf8').strip() sys.path.append(os.path.join(ROOT, 'reindexer/reindex_shard_generator/src')) from reindex_shard_config import get_number_of_shards # noqa DYNAMO_CONFIGS = { 'miro': {'table': 'vhs-sourcedata-miro', 'maybeIndex': 'reindexTracker'}, 'sierra': {'table': 'vhs-sourcedata-sierra', 'maybeIndex': 'reindexTracker'} } def get_topic_name(source_name): return f'reindex_jobs-{source_name}' def all_shard_ids(source_name): """ Generates all the shard IDs in a given source name. e.g. miro/1, miro/2, miro/3, ... """ count = get_number_of_shards(source_name=source_name) for shard_index in range(count): yield f'{source_name}/{shard_index}' def all_messages(total_segments): """ Generates all the messages to be sent to SNS. """ for i in range(total_segments): yield { 'segment': i, 'totalSegments': total_segments } def publish_messages(topic_arn, messages): """Publish a sequence of messages to an SNS topic.""" sns_client = boto3.client('sns') for m in tqdm.tqdm(messages): resp = sns_client.publish( TopicArn=topic_arn, MessageStructure='json', Message=json.dumps({ 'default': json.dumps(m) }), Subject=f'Source: {__file__}' ) assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, resp def post_to_slack(source_name, reason): """ Posts a message about the reindex in Slack, so we can track them. """ # Get the name of the current user. iam = boto3.client('iam') username = iam.get_user()['User']['UserName'] # Get the non-critical Slack token. s3 = boto3.client('s3') tfvars_obj = s3.get_object( Bucket='wellcomecollection-platform-infra', Key='terraform.tfvars' ) tfvars_body = tfvars_obj['Body'].read() tfvars = hcl.loads(tfvars_body) webhook_url = tfvars['non_critical_slack_webhook'] message = ( f'*{username}* started a reindex in *{source_name}*\n' f'Reason: *{reason}*' ) slack_data = { 'username': 'reindex-tracker', 'icon_emoji': ':dynamodb:', 'color': '#2E72B8', 'title': 'reindexer', 'fields': [{'value': message}] } resp = requests.post( webhook_url, data=json.dumps(slack_data), headers={'Content-Type': 'application/json'} ) resp.raise_for_status() def build_topic_arn(topic_name): """Given a topic name, return the topic ARN.""" # https://stackoverflow.com/a/37723278/1558022 sts_client = boto3.client('sts') account_id = sts_client.get_caller_identity().get('Account') return f'arn:aws:sns:eu-west-1:{account_id}:{topic_name}' def main(): args = docopt.docopt(__doc__) source_name = args['--source'] reason = args['--reason'] print(f'Triggering a reindex in {source_name}') post_to_slack(source_name=source_name, reason=reason) shard_ids = all_shard_ids(source_name=source_name) messages = all_messages(total_segments=150) topic_arn = build_topic_arn(topic_name=get_topic_name(source_name)) publish_messages( topic_arn=topic_arn, messages=messages ) # Now we update the write capacity of the SourceData table as high # as it can go -- we've seen issues where the table capacity fails to # scale up correctly, which slows down the reindexer. dynamo_config = DYNAMO_CONFIGS[source_name] max_capacity = get_dynamodb_max_table_capacity( table_name=dynamo_config['table'] ) table_name = dynamo_config['table'] gsi_name = dynamo_config['maybeIndex'] print(f'Setting {table_name} table capacity to {max_capacity}') set_dynamodb_table_capacity( table_name=table_name, desired_capacity=max_capacity ) max_capacity = get_dynamodb_max_gsi_capacity( table_name=table_name, gsi_name=gsi_name ) print(f'Setting {table_name} GSI {gsi_name} capacity to {max_capacity}') set_dynamodb_gsi_capacity( table_name=table_name, gsi_name=gsi_name, desired_capacity=max_capacity ) if __name__ == '__main__': try: main() except KeyboardInterrupt: import sys sys.exit(1)
Python
0.000004
@@ -512,36 +512,8 @@ son%0A -import os%0Aimport subprocess%0A impo @@ -770,909 +770,261 @@ )%0A%0A%0A -# Reindex shards are added by a %22reindex_shard_generator%22 Lambda.%0A# Import the utility code that assigns reindex shards.%0AROOT = subprocess.check_output(%0A %5B'git', 'rev-parse', '--show-toplevel'%5D).decode('utf8').strip()%0Asys.path.append(os.path.join(ROOT, 'reindexer/reindex_shard_generator/src'))%0A%0Afrom reindex_shard_config import get_number_of_shards # noqa%0A%0A%0ADYNAMO_CONFIGS = %7B%0A 'miro': %7B'table': 'vhs-sourcedata-miro', 'maybeIndex': 'reindexTracker'%7D,%0A 'sierra': %7B'table': 'vhs-sourcedata-sierra', 'maybeIndex': 'reindexTracker'%7D%0A%7D%0A%0A%0Adef get_topic_name(source_name):%0A return f'reindex_jobs-%7Bsource_name%7D'%0A%0A%0Adef all_shard_ids(source_name):%0A %22%22%22%0A Generates all the shard IDs in a given source name.%0A%0A e.g. miro/1, miro/2, miro/3, ...%0A %22%22%22%0A count = get_number_of_shards(source_name=source_name)%0A%0A for shard_index in range(count):%0A yield f'%7Bsource_name%7D/%7Bshard_index +DYNAMO_CONFIGS = %7B%0A 'miro': %7B'table': 'vhs-sourcedata-miro', 'maybeIndex': 'reindexTracker'%7D,%0A 'sierra': %7B'table': 'vhs-sourcedata-sierra', 'maybeIndex': 'reindexTracker'%7D%0A%7D%0A%0A%0Adef get_topic_name(source_name):%0A return f'reindex_jobs-%7Bsource_name %7D'%0A%0A @@ -3341,63 +3341,8 @@ n)%0A%0A - shard_ids = all_shard_ids(source_name=source_name)%0A @@ -4566,27 +4566,8 @@ pt:%0A - import sys%0A
1c6ed4130baacf0d0f662b6aa056630dd7fd383d
Fix vocab splitting
spraakbanken/s5/spr_local/make_recog_vocab.py
spraakbanken/s5/spr_local/make_recog_vocab.py
#!/usr/bin/env python3 import sys import collections def main(in_vocab, size, out_vocab,): counter = collections.Counter() size = int(size) for line in open(in_vocab, encoding='utf-8'): word, count = line.strip().split() if any(x.isdigit() for x in word): continue punctuation = "\\/?.,!;:\"\'()-=+[]%§*¤ïÐ$&<>#@{}" if any(x in punctuation for x in word): continue counter[word] += int(count) with open(out_vocab, 'w', encoding='utf-8') as out_f: for w, c in counter.most_common(size): print(w, file=out_f) if __name__ == "__main__": if len(sys.argv) != 4: exit("3 arguments: in_vocab, desired_size, out_vocab") main(*sys.argv[1:])
Python
0.003701
@@ -227,22 +227,27 @@ ine. +r strip( +%22%5Cn%22 ).split( )%0A @@ -242,16 +242,19 @@ ).split( +%22 %22 )%0A
7be728d551d7d2becd70b575f95facbbd561e69b
Add latest version of libsigsegv (#3449)
var/spack/repos/builtin/packages/libsigsegv/package.py
var/spack/repos/builtin/packages/libsigsegv/package.py
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libsigsegv(AutotoolsPackage): """GNU libsigsegv is a library for handling page faults in user mode.""" homepage = "https://www.gnu.org/software/libsigsegv/" url = "https://ftp.gnu.org/gnu/libsigsegv/libsigsegv-2.10.tar.gz" patch('patch.new_config_guess', when='@2.10') version('2.10', '7f96fb1f65b3b8cbc1582fb7be774f0f') def configure_args(self): return ['--enable-shared']
Python
0
@@ -1474,17 +1474,17 @@ segv-2.1 -0 +1 .tar.gz%22 @@ -1532,24 +1532,80 @@ n='@2.10')%0A%0A + version('2.11', 'a812d9481f6097f705599b218eea349f')%0A version(
20a89ca326712058f3f22621eed725c0f510bee3
Add branch with bugfix (#8355)
var/spack/repos/builtin/packages/meraculous/package.py
var/spack/repos/builtin/packages/meraculous/package.py
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Meraculous(CMakePackage): """Meraculous is a while genome assembler for Next Generation Sequencing data geared for large genomes.""" homepage = "http://jgi.doe.gov/data-and-tools/meraculous/" url = "https://downloads.sourceforge.net/project/meraculous20/Meraculous-v2.2.4.tar.gz" version('2.2.4', '349feb6cb178643a46e4b092c87bad3a') depends_on('perl', type=('build', 'run')) depends_on('boost@1.5.0:') depends_on('gnuplot@3.7:') depends_on('perl-log-log4perl', type=('build', 'run')) conflicts('%gcc@6.0.0:', when='@2.2.4') def patch(self): edit = FileFilter('CMakeLists.txt') edit.filter("-static-libstdc\+\+", "") def setup_environment(self, spack_env, run_env): run_env.set('MERACULOUS_ROOT', self.prefix) run_env.prepend_path('PERL5LIB', self.prefix.lib)
Python
0
@@ -1562,16 +1562,155 @@ ar.gz%22%0A%0A + version('2.2.5.1',%0A git=%22https://bitbucket.org/berkeleylab/genomics-meraculous2.git%22,%0A branch=%22release-2.2.5.1%22)%0A vers
5d608a855132f0a378e44b3c0c7dbba1f4f4dace
fix corehq.messaging.smsbackends.twilio.tests.test_log_call:TwilioLogCallTestCase.test_log_call
corehq/messaging/smsbackends/twilio/tests/test_log_call.py
corehq/messaging/smsbackends/twilio/tests/test_log_call.py
from __future__ import absolute_import from __future__ import unicode_literals import corehq.apps.ivr.tests.util as util from corehq.apps.ivr.models import Call from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend from corehq.messaging.smsbackends.twilio.views import IVR_RESPONSE from django.test import Client class TwilioLogCallTestCase(util.LogCallTestCase): def setUp(self): super(TwilioLogCallTestCase, self).setUp() self.backend = SQLTwilioBackend.objects.create( name='TWILIO', is_global=True, hq_api_id=SQLTwilioBackend.get_api_id() ) def tearDown(self): self.backend.delete() super(TwilioLogCallTestCase, self).tearDown() def test_401_response(self): with self.create_case(): start_count = Call.by_domain(self.domain).count() response = Client().post('/twilio/ivr/xxxxx', { 'From': self.phone_number, 'CallSid': 'xyz', }) self.assertEqual(response.status_code, 401) end_count = Call.by_domain(self.domain).count() self.assertEqual(start_count, end_count) def simulate_inbound_call(self, phone_number): url = '/twilio/ivr/%s' % self.backend.inbound_api_key return Client().post(url, { 'From': phone_number, 'CallSid': 'xyz', }) def check_response(self, response): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, IVR_RESPONSE)
Python
0.000002
@@ -1543,16 +1543,32 @@ .content +.decode('utf-8') , IVR_RE
968f0f3d41a546c4c6614d24be3e077ba1ee37b9
Reorganiza imports de xml_utils
packtools/sps/utils/xml_utils.py
packtools/sps/utils/xml_utils.py
import logging import re from lxml import etree from dsm.utils.files import read_file logger = logging.getLogger(__name__) class LoadToXMLError(Exception): ... def fix_xml(xml_str): return fix_namespace_prefix_w(xml_str) def fix_namespace_prefix_w(content): """ Convert os textos cujo padrão é `w:st="` em `w-st="` """ pattern = r"\bw:[a-z]{1,}=\"" found_items = re.findall(pattern, content) logger.debug("Found %i namespace prefix w", len(found_items)) for item in set(found_items): new_namespace = item.replace(":", "-") logger.debug("%s -> %s" % (item, new_namespace)) content = content.replace(item, new_namespace) return content def _get_xml_content(xml): if isinstance(xml, str): try: content = read_file(xml) except (FileNotFoundError, OSError): content = xml content = fix_xml(content) return content.encode("utf-8") return xml def get_xml_tree(content): parser = etree.XMLParser(remove_blank_text=True, no_network=True) try: content = _get_xml_content(content) xml_tree = etree.XML(content, parser) # if isinstance(content, str): # # xml_tree = etree.parse(BytesIO(content.encode("utf-8")), parser) # xml_tree = etree.parse(StringIO(content), parser) # else: # # content == zipfile.read(sps_xml_file) # except ValueError as exc: # xml_tree = etree.XML(content, parser) except etree.XMLSyntaxError as exc: raise LoadToXMLError(str(exc)) from None else: return xml_tree def tostring(node, doctype=None, pretty_print=False): return etree.tostring( node, doctype=doctype, xml_declaration=True, method="xml", encoding="utf-8", pretty_print=pretty_print, ).decode("utf-8") def node_text(node, doctype=None, pretty_print=False): items = [node.text or ""] for child in node.getchildren(): items.append( etree.tostring(child, encoding="utf-8").decode("utf-8") ) return "".join(items)
Python
0
@@ -17,16 +17,43 @@ mport re +%0A%0Afrom copy import deepcopy %0Afrom lx @@ -72,28 +72,68 @@ ree%0A -%0A from -dsm.utils.f +packtools.sps import exceptions%0Afrom packtools.sps.ut il -e s im @@ -137,25 +137,26 @@ import -read_file +file_utils %0A%0A%0Alogge
4671e4a1f8f18ec26180a5b4093d70e7d3913302
fix for mk language
plugins/tts.py
plugins/tts.py
import aiohttp from plugin_system import Plugin plugin = Plugin('Голос', usage="скажи [выражение] - бот сформирует " "голосовое сообщение на основе текста") try: from gtts import gTTS import langdetect except ImportError: plugin.log('gTTS или langdetect не установлены, плагин Голос не будет работать') gTTS = None langdetect = None FAIL_MSG = 'Я не смог это произнести :(' @plugin.on_command('скажи') async def say_text(msg, args): if not gTTS or not langdetect: return await msg.answer('Я не могу говорить, ' 'так как у меня не хватает модулей :(') text = ' '.join(args) try: # Используется Google Text To Speech и библиотека langdetect tts = gTTS(text=text, lang=langdetect.detect(text)) except Exception as ex: # На самом деле не все языки, которых нет в gTTS, не поддерживаются # Например, gTTS считает, что GTTS не поддерживает украинский, хотя он поддерживает if 'Language' in ex: return await msg.answer('Данный язык не поддерживается.' 'Если вы считаете, что он должен поддерживаться,' 'напишите администратору бота!') raise # Если эта ошибка не связана с gTTS, бросаем её ещё раз # Сохраняем файл с голосом tts.save('audio.mp3') # Получаем URL для загрузки аудио сообщения upload_server = await msg.vk.method('docs.getUploadServer', {'type':'audio_message'}) url = upload_server.get('upload_url') if not url: return await msg.answer(FAIL_MSG) # Загружаем аудио через aiohttp form_data = aiohttp.FormData() form_data.add_field('file', open('audio.mp3', 'rb')) async with aiohttp.post(url, data=form_data) as resp: file_url = await resp.json() file = file_url.get('file') if not file: return await msg.answer(FAIL_MSG) # Сохраняем файл в документы (чтобы можно было прикрепить к сообщению) saved_data = await msg.vk.method('docs.save', {'file':file} ) # Получаем первый элемент, так как мы сохранили 1 файл media = saved_data[0] media_id, owner_id = media['id'], media['owner_id'] # Прикрепляем аудио к сообщению :) await msg.answer('', attachment=f'doc{owner_id}_{media_id}')
Python
0.000001
@@ -754,16 +754,169 @@ +lang = langdetect.detect(text)%0A if lang == 'mk':%0A # %D0%98%D0%BD%D0%BE%D0%B3%D0%B4%D0%B0 langdetect %D0%B4%D0%B5%D1%82%D0%B5%D0%BA%D1%82%D0%B8%D1%82 %D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9 %D0%BA%D0%B0%D0%BA %D0%BC%D0%B0%D0%BA%D0%B5%D0%B4%D0%BE%D0%BD%D1%81%D0%BA%D0%B8%D0%B9%0A lang = 'ru'%0A tts = gT @@ -938,35 +938,16 @@ ang=lang -detect.detect(text) )%0A ex
713fd67b4aa0d3a614ca149f86deeb2d5e913d12
fix installation on linux (#24706)
var/spack/repos/builtin/packages/py-keyring/package.py
var/spack/repos/builtin/packages/py-keyring/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyKeyring(PythonPackage): """The Python keyring library provides an easy way to access the system keyring service from python. It can be used in any application that needs safe password storage.""" homepage = "https://github.com/jaraco/keyring" pypi = "keyring/keyring-23.0.1.tar.gz" version('23.0.1', sha256='045703609dd3fccfcdb27da201684278823b72af515aedec1a8515719a038cb8') depends_on('python@3.6:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('py-setuptools-scm@3.4.1:+toml', type='build') depends_on('py-importlib-metadata@3.6:', type=('build', 'run')) # TODO: additional dependencies required for Windows/Linux
Python
0
@@ -852,24 +852,193 @@ d', 'run'))%0A + depends_on('py-secretstorage@3.2:', type=('build', 'run'), when='platform=linux')%0A depends_on('py-jeepney@0.4.2:', type=('build', 'run'), when='platform=linux')%0A%0A # TODO: @@ -1057,17 +1057,33 @@ ependenc -i +y on pywin32-ctyp es requi @@ -1101,11 +1101,5 @@ dows -/Linux %0A
e11f99e43ff9d909bf97f050c560663d38fb1388
Add fixture/result subdirectory.
test/unit/staging/test_link_dicom.py
test/unit/staging/test_link_dicom.py
from nose.tools import * import os, glob, shutil import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) from qipipe import staging # The test parent directory. ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..')) # The test fixture. FIXTURE = os.path.join(ROOT, 'fixtures', 'staging') # The test results. RESULTS = os.path.join(ROOT, 'results', 'staging') # The test result target. TARGET = os.path.join(RESULTS, 'data') # The test result delta. DELTA = os.path.join(RESULTS, 'delta') class TestLinkDicom: """TCIA dicom link unit tests.""" def test_link_dicom_files(self): shutil.rmtree(RESULTS, True) src_pnt_dirs = glob.glob(FIXTURE + '/*') opts = {'target': TARGET, 'include': '*concat*/*'} args = src_pnt_dirs + [opts] staging.link_dicom_files(*args) # Verify that there are no broken links. for root, dirs, files in os.walk(TARGET): for f in files: tgt_file = os.path.join(root, f) assert_true(os.path.islink(tgt_file), "Missing source -> target target link: %s" % tgt_file) assert_true(os.path.exists(tgt_file), "Broken source -> target link: %s" % tgt_file) # Test incremental delta. tgt = os.path.join(TARGET, 'patient01', 'visit02') # Clean the partial result. shutil.rmtree(tgt, True) # Clean the delta tree. shutil.rmtree(DELTA, True) # Add the delta argument. opts['delta'] = DELTA # Rerun to capture the delta. staging.link_dicom_files(*args) delta = os.path.join(DELTA, 'patient01', 'visit02') assert_true(os.path.islink(delta), "Missing delta -> target link: %s" % delta) assert_true(os.path.exists(delta), "Broken delta -> target link: %s" % delta) real_tgt = os.path.realpath(tgt) real_delta = os.path.realpath(delta) assert_equal(real_tgt, real_delta, "Delta does not reference the target: %s" % delta) non_delta = os.path.join(DELTA, 'patient01', 'visit01') assert_false(os.path.exists(non_delta), "Incorrectly added a target -> delta link in %s" % non_delta) # Cleanup. shutil.rmtree(RESULTS, True) if __name__ == "__main__": import nose nose.main(defaultTest=__name__)
Python
0
@@ -147,23 +147,40 @@ pipe - import staging +.staging import link_dicom_files %0A%0A# @@ -345,32 +345,46 @@ ures', 'staging' +, 'link_dicom' )%0A# The test res @@ -438,16 +438,30 @@ staging' +, 'link_dicom' )%0A# The
f06a4689fab32d7d2f4c848019978665656a5cdf
Implement hexfile record types used by GNU ld
mikroeuhb/hexfile.py
mikroeuhb/hexfile.py
import struct, logging from binascii import unhexlify from util import bord logger = logging.getLogger(__name__) def load(f, devkit): """Load a Intel HEX File from a file object into a devkit. The devkit must implement a write(address,data) method.""" lineno = 0 base_addr = 0 for line in f.xreadlines(): lineno += 1 line = line.strip() if line == '': continue if bord(line[0]) != ord(':'): raise IOError('line %d: malformed' % lineno) line = unhexlify(line[1:]) byte_count, address, record_type = struct.unpack('>BHB', line[:4]) correct_len = byte_count + 5 if len(line) != correct_len: logger.warn('line %d: should have %d bytes -- truncating' % (lineno, correct_len)) line = line[:correct_len] if sum(map(bord,line)) & 0xFF != 0: raise IOError('line %d: incorrect checksum' % lineno) data = line[4:-1] if record_type == 0x00: # data record devkit.write(base_addr + address, data) elif record_type == 0x01: # end of file record break elif record_type == 0x04: # extended linear address record if byte_count != 2: raise IOError('line %d: extended linear address record must have 2 bytes of data' % lineno) base_addr, = struct.unpack('>H', data) base_addr <<= 16 else: raise IOError('line %d: unsupported record type %d' % (lineno, record_type))
Python
0
@@ -1439,11 +1439,228 @@ el -se: +if record_type == 0x02: # extended segment address record%0A base_addr, = struct.unpack('%3EH', data)%0A base_addr %3C%3C= 4%0A elif record_type not in %5B0x03, 0x05%5D: # used for the initial PC (ignored) %0A
b9b3837937341e6b1b052bbfdd979e3bb57d87c4
Fix SSL security provider integration tests
tests/integration/test_with_ssl.py
tests/integration/test_with_ssl.py
from . import base class SSLTestCase(base.IntegrationTestCase): '''RabbitMQ integration test case.''' CTXT = { 'plugin.activemq.pool.1.port': 61614, 'plugin.activemq.pool.1.password': 'marionette', 'plugin.ssl_server_public': 'tests/fixtures/server-public.pem', 'plugin.ssl_client_private': 'tests/fixtures/client-private.pem', 'plugin.ssl_client_public': 'tests/fixtures/client-public.pem', } class TestWithSSLMCo20x(base.MCollective20x, SSLTestCase): '''MCollective integration test case.''' class TestWithSSLMCo22x(base.MCollective22x, SSLTestCase): '''MCollective integration test case.''' class TestWithSSLMCo23x(base.MCollective23x, SSLTestCase): '''MCollective integration test case.'''
Python
0
@@ -1,22 +1,114 @@ -from . import base +import os%0A%0Afrom pymco.test import ctxt%0Afrom . import base%0A%0AFIXTURES_PATH = os.path.join(ctxt.ROOT, 'fixtures') %0A%0A%0Ac @@ -523,24 +523,250 @@ ublic.pem',%0A + 'plugin.ssl_server_private': os.path.join(FIXTURES_PATH,%0A 'server-private.pem'),%0A 'securityprovider': 'ssl',%0A 'plugin.ssl_client_cert_dir': FIXTURES_PATH,%0A %7D%0A%0A%0Aclas
30bd0a8b50545e24ec69ecc4c720c508c318e008
Remove pdb
tests/mock_vws/utils.py
tests/mock_vws/utils.py
""" Utilities for tests for the VWS mock. """ from string import hexdigits from typing import Optional from urllib.parse import urljoin from requests.models import Response from common.constants import ResultCodes class Endpoint: """ Details of endpoints to be called in tests. """ def __init__(self, example_path: str, method: str, successful_headers_result_code: ResultCodes, successful_headers_status_code: int, content_type: Optional[str], content: bytes, ) -> None: """ Args: example_path: An example path for calling the endpoint. method: The HTTP method for the endpoint. successful_headers_result_code: The expected result code if the example path is requested with the method. successful_headers_status_code: The expected status code if the example path is requested with the method. content: The data to send with the request. Attributes: example_path: An example path for calling the endpoint. method: The HTTP method for the endpoint. content_type: The `Content-Type` header to send, or `None` if one should not be sent. content: The data to send with the request. url: The URL to call the path with. successful_headers_result_code: The expected result code if the example path is requested with the method. successful_headers_status_code: The expected status code if the example path is requested with the method. """ self.example_path = example_path self.method = method self.content_type = content_type self.content = content self.url = urljoin('https://vws.vuforia.com/', example_path) self.successful_headers_status_code = successful_headers_status_code self.successful_headers_result_code = successful_headers_result_code def assert_vws_failure(response: Response, status_code: int, result_code: ResultCodes) -> None: """ Assert that a VWS failure response is as expected. Args: response: The response returned by a request to VWS. status_code: The expected status code of the response. result_code: The expected result code of the response. Raises: AssertionError: The response is not in the expected VWS error format for the given codes. """ # import pdb; pdb.set_trace() assert response.json().keys() == {'transaction_id', 'result_code'} assert_vws_response( response=response, status_code=status_code, result_code=result_code, ) def assert_vws_response(response: Response, status_code: int, result_code: ResultCodes, ) -> None: """ Assert that a VWS response is as expected, at least in part. https://library.vuforia.com/articles/Solution/How-To-Interperete-VWS-API-Result-Codes implies that the expected status code can be worked out from the result code. However, this is not the case as the real results differ from the documentation. For example, it is possible to get a "Fail" result code and a 400 error. Args: response: The response returned by a request to VWS. status_code: The expected status code of the response. result_code: The expected result code of the response. Raises: AssertionError: The response is not in the expected VWS format for the given codes. """ message = 'Expected {expected}, got {actual}.' assert response.status_code == status_code, message.format( expected=status_code, actual=response.status_code, ) assert response.json()['result_code'] == result_code.value assert response.headers['Content-Type'] == 'application/json' transaction_id = response.json()['transaction_id'] assert len(transaction_id) == 32 assert all(char in hexdigits for char in transaction_id)
Python
0.000004
@@ -2623,42 +2623,8 @@ %22%22%22%0A - # import pdb; pdb.set_trace()%0A
dcc07355786f94da36d938239c5c60d5302e4d42
test for identity link
testapp/tests/test_renderer_infer.py
testapp/tests/test_renderer_infer.py
#!/usr/bin/env python # encoding: utf-8 from django.test import TestCase from collection_json import Collection from testapp.models import Person class DictionaryTest(TestCase): """tests when the response contains a dictionary""" def test_no_serializer_view(self): with self.assertRaises(TypeError): self.client.get("/infer/noserializer") class PersonTest(TestCase): def setUp(self): self.num_people = 10 for i in range(self.num_people): p = Person.objects.create(name="person{}".format(i), address="address{}".format(i)) p.save() response = self.client.get("/infer/person") content = response.content.decode("utf-8") self.collection = Collection.from_json(content) def test_db_setup(self): """asserts that the database was properly initialized""" self.assertEqual(self.num_people, len(Person.objects.all())) def test_collection_items(self): """asserts that the right number of items was parsed""" self.assertEqual(self.num_people, len(self.collection.items)) def test_collection_names(self): """tests that the given attribute was parsed correctly""" for i, item in enumerate(self.collection.items): expected = "person{}".format(i) self.assertEqual(item.name.value, expected) def test_collection_address(self): """tests that the given attribute was parsed correctly""" for i, item in enumerate(self.collection.items): expected = "address{}".format(i) self.assertEqual(item.address.value, expected) class ListTest(TestCase): """tests when the response contains a list""" urls = "testapp.urls"
Python
0
@@ -141,16 +141,114 @@ Person%0A%0A +try:%0A from urlparse import urlparse%0Aexcept ImportError:%0A from urllib.parse import urlparse%0A%0A %0Aclass D @@ -739,16 +739,51 @@ .save()%0A + self.url = %22/infer/person%22%0A @@ -809,31 +809,24 @@ ent.get( -%22/infer/person%22 +self.url )%0A @@ -1791,16 +1791,222 @@ ected)%0A%0A + def test_collection_identity_link(self):%0A %22%22%22tests that the href for the collection is correct%22%22%22%0A actual = urlparse(self.collection.href).path%0A self.assertEqual(actual, self.url)%0A%0A %0Aclass L
943ecc39af2b152bc8d5fed55bdafe5332a33d75
remove xfail (#4458)
testing/kfctl/endpoint_ready_test.py
testing/kfctl/endpoint_ready_test.py
import datetime import json import logging import os import subprocess import tempfile import uuid from retrying import retry import pytest from kubeflow.testing import util from testing import deploy_utils from testing import gcp_util # TODO(https://github.com/kubeflow/kfctl/issues/42): # Test is failing pretty consistently. @pytest.mark.xfail # There's really no good reason to run test_endpoint during presubmits. # We shouldn't need it to feel confident that kfctl is working. @pytest.mark.skipif(os.getenv("JOB_TYPE") == "presubmit", reason="test endpoint doesn't run in presubmits") def test_endpoint_is_ready(record_xml_attribute, project, app_path, app_name, use_basic_auth): """Test that Kubeflow was successfully deployed. Args: project: The gcp project that we deployed kubeflow app_name: The name of the kubeflow deployment """ util.set_pytest_junit(record_xml_attribute, "test_endpoint_is_ready") url = "https://{}.endpoints.{}.cloud.goog".format(app_name, project) if use_basic_auth: with open(os.path.join(app_path, "login.json"), "r") as f: login = json.load(f) # Let it fail if login info cannot be found. username = login["KUBEFLOW_USERNAME"] password = login["KUBEFLOW_PASSWORD"] if not gcp_util.basic_auth_is_ready(url, username, password): raise Exception("Basic auth endpoint is not ready") else: # Owned by project kubeflow-ci-deployment. os.environ["CLIENT_ID"] = "29647740582-7meo6c7a9a76jvg54j0g2lv8lrsb4l8g.apps.googleusercontent.com" if not gcp_util.iap_is_ready(url): raise Exception("IAP endpoint is not ready") if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(pathname)s|%(lineno)d| %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) pytest.main()
Python
0.000009
@@ -236,119 +236,8 @@ il%0A%0A -# TODO(https://github.com/kubeflow/kfctl/issues/42):%0A# Test is failing pretty consistently.%0A@pytest.mark.xfail%0A # Th
291681041f434a981a54371bb7f9f1fa9637afb7
improve comment collapse
polls/admin.py
polls/admin.py
from django.contrib import admin from polls.models import Choice, Question class ChoiceInline(admin.TabularInline): model = Choice extra = 3 class QuestionAdmin(admin.ModelAdmin): fieldsets = [ (None, {'fields': ['question_text']}), ('Date information', {'fields': ['pub_date'], #'classes': ['collapse']}), ] inlines = [ChoiceInline] list_display = ('question_text', 'pub_date', 'was_published_recently') list_filter = ['pub_date','question_text'] search_fields = ['question_text'] admin.site.register(Question, QuestionAdmin)
Python
0.000002
@@ -340,16 +340,26 @@ llapse'%5D +%0A %09 %7D),%0A
21ecd9a319c5e0dceed36fcf9cabdc864f735c2c
Write test for nearley include
tests/test_nearley/test_nearley.py
tests/test_nearley/test_nearley.py
from __future__ import absolute_import import unittest import logging import os import sys logging.basicConfig(level=logging.INFO) from lark.tools.nearley import create_code_for_nearley_grammar NEARLEY_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'nearley')) BUILTIN_PATH = os.path.join(NEARLEY_PATH, 'builtin') class TestNearley(unittest.TestCase): def test_css(self): css_example_grammar = """ # http://www.w3.org/TR/css3-color/#colorunits @builtin "whitespace.ne" @builtin "number.ne" @builtin "postprocessors.ne" csscolor -> "#" hexdigit hexdigit hexdigit hexdigit hexdigit hexdigit {% function(d) { return { "r": parseInt(d[1]+d[2], 16), "g": parseInt(d[3]+d[4], 16), "b": parseInt(d[5]+d[6], 16), } } %} | "#" hexdigit hexdigit hexdigit {% function(d) { return { "r": parseInt(d[1]+d[1], 16), "g": parseInt(d[2]+d[2], 16), "b": parseInt(d[3]+d[3], 16), } } %} | "rgb" _ "(" _ colnum _ "," _ colnum _ "," _ colnum _ ")" {% $({"r": 4, "g": 8, "b": 12}) %} | "hsl" _ "(" _ colnum _ "," _ colnum _ "," _ colnum _ ")" {% $({"h": 4, "s": 8, "l": 12}) %} | "rgba" _ "(" _ colnum _ "," _ colnum _ "," _ colnum _ "," _ decimal _ ")" {% $({"r": 4, "g": 8, "b": 12, "a": 16}) %} | "hsla" _ "(" _ colnum _ "," _ colnum _ "," _ colnum _ "," _ decimal _ ")" {% $({"h": 4, "s": 8, "l": 12, "a": 16}) %} hexdigit -> [a-fA-F0-9] colnum -> unsigned_int {% id %} | percentage {% function(d) {return Math.floor(d[0]*255); } %} """ code = create_code_for_nearley_grammar(css_example_grammar, 'csscolor', BUILTIN_PATH, './') d = {} exec (code, d) parse = d['parse'] c = parse('#a199ff') assert c['r'] == 161 assert c['g'] == 153 assert c['b'] == 255 c = parse('rgb(255, 70%, 3)') assert c['r'] == 255 assert c['g'] == 178 assert c['b'] == 3 if __name__ == '__main__': unittest.main()
Python
0
@@ -2280,16 +2280,380 @@ %5D == 3%0A%0A + def test_include(self):%0A fn = os.path.join(NEARLEY_PATH, 'test/grammars/folder-test.ne')%0A with open(fn) as f:%0A grammar = f.read()%0A%0A code = create_code_for_nearley_grammar(grammar, 'main', BUILTIN_PATH, os.path.dirname(fn))%0A d = %7B%7D%0A exec (code, d)%0A parse = d%5B'parse'%5D%0A%0A parse('a')%0A parse('b')%0A%0A %0Aif __na
69770ecc4715788837f5f769e0d2f1e6690a153f
Allow test_core to run as a test program
tests/test_splauncher/test_core.py
tests/test_splauncher/test_core.py
from __future__ import print_function __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>" __date__ = "$May 18, 2015 22:08:21 EDT$" import os import shutil import tempfile import time import unittest from splauncher.core import main class TestCore(unittest.TestCase): def setUp(self): self.cwd = os.getcwd() self.tempdir = "" self.tempdir = tempfile.mkdtemp() os.chdir(self.tempdir) print("tempdir = \"%s\"" % self.tempdir) def tearDown(self): os.chdir(self.cwd) shutil.rmtree(self.tempdir) self.tempdir = "" self.cwd = "" def test_main_0(self): main("", "python", "-c", "from __future__ import print_function;" + "import sys;" + "print(\"output\", file=sys.stdout)" ) while len(os.listdir(self.tempdir)) < 2: time.sleep(1) time.sleep(1) filenames = [] for each_filename in os.listdir(self.tempdir): filenames.append(os.path.join(self.tempdir, each_filename)) filenames.sort() assert ".err" in filenames[0] assert ".out" in filenames[1] with open(filenames[0], "r") as f: s = f.read().strip() print("File \"%s\" contains \"%s\"." % (f.name, s)) assert s == "" with open(filenames[1], "r") as f: s = f.read().strip() print("File \"%s\" contains \"%s\"." % (f.name, s)) assert s == "output" def test_main_1(self): main("", "python", "-c", "from __future__ import print_function;" + "import sys;" + "print(\"error\", file=sys.stderr)" ) while len(os.listdir(self.tempdir)) < 2: time.sleep(1) time.sleep(1) filenames = [] for each_filename in os.listdir(self.tempdir): filenames.append(os.path.join(self.tempdir, each_filename)) filenames.sort() assert ".err" in filenames[0] assert ".out" in filenames[1] with open(filenames[0], "r") as f: s = f.read().strip() print("File \"%s\" contains \"%s\"." % (f.name, s)) assert s == "error" with open(filenames[1], "r") as f: s = f.read().strip() print("File \"%s\" contains \"%s\"." % (f.name, s)) assert s == ""
Python
0.000002
@@ -2424,28 +2424,102 @@ %0A assert s == %22%22%0A +%0A%0Aif __name__ == '__main__':%0A import sys%0A sys.exit(unittest.main())%0A
534a21e8d664a4216af14db95415dafa0508b3b9
Remove test
tests/integration/client/standard.py
tests/integration/client/standard.py
# -*- coding: utf-8 -*- # Import python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import salt libs import integration import salt.utils class StdTest(integration.ModuleCase): ''' Test standard client calls ''' def test_cli(self): ''' Test cli function ''' cmd_iter = self.client.cmd_cli( 'minion', 'test.ping', ) for ret in cmd_iter: self.assertTrue(ret['minion']) # make sure that the iter waits for long running jobs too cmd_iter = self.client.cmd_cli( 'minion', 'test.sleep', [6] ) num_ret = 0 for ret in cmd_iter: num_ret += 1 self.assertTrue(ret['minion']) assert num_ret > 0 # ping a minion that doesn't exist, to make sure that it doesn't hang forever # create fake minion key_file = os.path.join(self.master_opts['pki_dir'], 'minions', 'footest') # touch the file with salt.utils.fopen(key_file, 'a'): pass # ping that minion and ensure it times out try: cmd_iter = self.client.cmd_cli( 'footest', 'test.ping', ) num_ret = 0 for ret in cmd_iter: num_ret += 1 self.assertTrue(ret['minion']) assert num_ret == 0 finally: os.unlink(key_file) def test_iter(self): ''' test cmd_iter ''' cmd_iter = self.client.cmd_iter( 'minion', 'test.ping', ) for ret in cmd_iter: self.assertTrue(ret['minion']) def test_iter_no_block(self): ''' test cmd_iter_no_block ''' cmd_iter = self.client.cmd_iter_no_block( 'minion', 'test.ping', ) for ret in cmd_iter: if ret is None: continue self.assertTrue(ret['minion']) def test_full_returns(self): ''' test cmd_iter ''' ret = self.client.cmd_full_return( 'minion', 'test.ping', ) self.assertIn('minion', ret) self.assertEqual({'ret': True, 'success': True}, ret['minion']) ret = self.client.cmd_full_return( 'minion', 'test.pong', ) self.assertIn('minion', ret) if self.master_opts['transport'] == 'zeromq': self.assertEqual( { 'out': 'nested', 'ret': '\'test.pong\' is not available.', 'success': False }, ret['minion'] ) elif self.master_opts['transport'] == 'raet': self.assertEqual( {'success': False, 'ret': '\'test.pong\' is not available.'}, ret['minion'] ) def test_disconnected_return(self): ''' Test return/messaging on a disconnected minion ''' test_ret = {'ret': 'Minion did not return. [No response]', 'out': 'no_return'} # Create a minion key, but do not start the "fake" minion. This mimics # a disconnected minion. key_file = os.path.join(self.master_opts['pki_dir'], 'minions', 'disconnected') with salt.utils.fopen(key_file, 'a'): pass # ping disconnected minion and ensure it times out and returns with correct message try: cmd_iter = self.client.cmd_cli( 'disconnected', 'test.ping', show_timeout=True ) num_ret = 0 for ret in cmd_iter: num_ret += 1 self.assertEqual(ret['disconnected']['ret'], test_ret['ret']) self.assertEqual(ret['disconnected']['out'], test_ret['out']) # Ensure that we entered the loop above self.assertEqual(num_ret, 1) finally: os.unlink(key_file) if __name__ == '__main__': from integration import run_tests run_tests(StdTest)
Python
0
@@ -2545,670 +2545,8 @@ %5D)%0A%0A - ret = self.client.cmd_full_return(%0A 'minion',%0A 'test.pong',%0A )%0A self.assertIn('minion', ret)%0A%0A if self.master_opts%5B'transport'%5D == 'zeromq':%0A self.assertEqual(%0A %7B%0A 'out': 'nested',%0A 'ret': '%5C'test.pong%5C' is not available.',%0A 'success': False%0A %7D,%0A ret%5B'minion'%5D%0A )%0A elif self.master_opts%5B'transport'%5D == 'raet':%0A self.assertEqual(%0A %7B'success': False, 'ret': '%5C'test.pong%5C' is not available.'%7D,%0A ret%5B'minion'%5D%0A )%0A%0A
3a968153d71f9d0876cdd058482463353eddbd74
fix remaining regexs to account for mismatched results in output files
tests/regression_tests/regression.py
tests/regression_tests/regression.py
import re import os import shutil import numpy import pickle from pygbe.main import main as pygbe ITER_REGEX = re.compile('Converged after (\d*) iterations') N_REGEX = re.compile('Total elements : (\d*)') ESOLV_REGEX = re.compile('Esolv = (\-*\d*\.\d*)\ kcal\/mol') ESURF_REGEX = re.compile('[^: ]Esurf = (\-*\d*\.\d*)\ kcal\/mol') ECOUL_REGEX = re.compile('Ecoul = (\-*\d*\.\d*)\ kcal\/mol') TIME_REGEX = re.compile('Time = (\-*\d*\.\d*)\ s') def picklesave(test_outputs): with open('tests','w') as f: pickle.dump(test_outputs, f) def pickleload(): with open('tests', 'r') as f: test_outputs = pickle.load(f) return test_outputs def scanOutput(filename): with open(filename, 'r') as f: txt = f.read() N= re.search(N_REGEX, txt) if N: N = int(N.group(1)) iterations = re.search(ITER_REGEX, txt) if iterations: iterations = int(iterations.group(1)) Esolv = re.search(ESOLV_REGEX, txt) if Esolv: Esolv = float(Esolv.group(1)) Esurf = re.search(ESURF_REGEX, txt) if Esurf: Esurf = float(Esurf.group(1)) Ecoul = re.search(ECOUL_REGEX, txt) if Ecoul: Ecoul = float(Ecoul.group(1)) Time = re.search(TIME_REGEX, txt) if Time: Time = float(Time.group(1)) return N, iterations, Esolv, Esurf, Ecoul, Time def run_regression(mesh, test_name, problem_folder, param, delete_output=True): """ Runs regression tests over a series of mesh sizes Inputs: ------ mesh: array of mesh suffixes problem_folder: str name of folder containing meshes, etc... param: str name of param file Returns: ------- N: len(mesh) array of elements of problem iterations: # of iterations to converge Esolv: array of solvation energy Esurf: array of surface energy Ecoul: array of coulomb energy Time: time to solution (wall-time) """ print 'Runs for molecule + set phi/dphi surface' N = numpy.zeros(len(mesh)) iterations = numpy.zeros(len(mesh)) Esolv = numpy.zeros(len(mesh)) Esurf = numpy.zeros(len(mesh)) Ecoul = numpy.zeros(len(mesh)) Time = numpy.zeros(len(mesh)) for i in range(len(mesh)): print 'Start run for mesh '+mesh[i] outfile = pygbe(['', '-p', '{}'.format(param), '-c', '{}_{}.config'.format(test_name, mesh[i]), '-o', 'output_{}_{}'.format(test_name, mesh[i]), '-g', '../../pygbe/', '{}'.format(problem_folder),], return_output_fname=True) print 'Scan output file' outfolder = os.path.join('{}'.format(problem_folder), 'output_{}_{}'.format(test_name, mesh[i])) outfile = os.path.join(outfolder, outfile) N[i],iterations[i],Esolv[i],Esurf[i],Ecoul[i],Time[i] = scanOutput(outfile) if delete_output: shutil.rmtree(outfolder) return(N, iterations, Esolv, Esurf, Ecoul, Time)
Python
0.00004
@@ -227,16 +227,21 @@ ompile(' +%5B%5E: %5D Esolv = @@ -267,16 +267,16 @@ %5C/mol')%0A - ESURF_RE @@ -359,16 +359,21 @@ ompile(' +%5B%5E: %5D Ecoul = @@ -1365,24 +1365,24 @@ group(1))%0A%0A%0A - return N @@ -1423,16 +1423,1260 @@ Time%0A%0A%0A +def report_results(error, N, iterations, Einter, analytical, total_time):%0A %22%22%22%0A Prints out information for the regression tests.%0A%0A Inputs:%0A -------%0A error: list of float%0A L2 Norm of error against analytical solution%0A N: list of int%0A Number of elements in test%0A iterations: list of int%0A Number of iterations to converge%0A Einter: list of float%0A Interaction energy%0A analytical: list of float%0A Interaction energy (analytical solution)%0A total_time: list of float%0A Total wall time of run i%0A %22%22%22%0A%0A flag = 0%0A for i in range(len(error)-1):%0A rate = error%5Bi%5D/error%5Bi+1%5D%0A if abs(rate-4)%3E0.6:%0A flag = 1%0A print('Bad convergence for mesh %7B%7D to %7B%7D, with rate %7B%7D'.%0A format(i, i+1, rate))%0A%0A if flag==0:%0A print('Passed convergence test!')%0A%0A print('%5CnNumber of elements : %7B%7D'.format(N))%0A print('Number of iteration: %7B%7D'.format(iterations))%0A print('Interaction energy : %7B%7D'.format(Einter))%0A print('Analytical solution: %7B%7D kcal/mol'.format(analytical))%0A print('Error : %7B%7D'.format(error))%0A print('Total time : %7B%7D'.format(total_time))%0A%0A%0A %0Adef run
05e496de4f6ebbb9e77c6cb1796cc1050a41a181
Adjust whitespace for pep8
pratchett/__init__.py
pratchett/__init__.py
HEADER = ("X-Clacks-Overhead", "GNU Terry Pratchett") class GNUTerryPratchett(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): def clacker(status, headers, *args, **kwargs): if HEADER not in headers: headers.append(HEADER) return start_response(status, headers, *args, **kwargs) return self.app(environ, clacker) def make_filter(global_conf): return GNUTerryPratchett
Python
0.9998
@@ -428,16 +428,17 @@ acker)%0A%0A +%0A def make
ddb4ed6701808ed5c4e928d042b84e0c84490e58
Bump version 0.0.4
memsource/__init__.py
memsource/__init__.py
__author__ = 'Gengo' __version__ = '0.0.3' __license__ = 'MIT'
Python
0
@@ -37,9 +37,9 @@ 0.0. -3 +4 '%0A__
949c7b55e295b4d87f2d7a1bb98242cb055129d1
Solve No.140 in Python with problems
140.py
140.py
class Solution: """ @param a, b, n: 32bit integers @return: An integer """ def fastPower(self, a, b, n): ans = 1 while b > 0: if b % 2==1: ans = ans * a % n a = a * a % n b = b / 2 return ans % n # WA
Python
0.005724
@@ -300,8 +300,42 @@ # WA + because of lintcode %0A # AC
86f33d7c88c728bb5ce0c885543dd54d942e2962
Fix strings broken by 1393650
tests/steps/snapshot.py
tests/steps/snapshot.py
# -*- coding: UTF-8 -*- from __future__ import unicode_literals from behave import step from dogtail.rawinput import typeText, pressKey from time import sleep from utils import get_showing_node_name @step('Add Snapshot named "{name}"') def add_snapshot(context, name): wait = 0 while len(context.app.findChildren(lambda x: x.roleName == 'push button' and x.showing and not x.name)) == 0: sleep(0.25) wait += 1 if wait == 20: raise Exception("Timeout: Node %s wasn't found showing" %name) context.app.findChildren(lambda x: x.roleName == 'push button' and x.showing and not x.name)[0].click() wait = 0 while len(context.app.findChildren(lambda x: x.roleName == 'toggle button' and x.showing \ and x.sensitive and x.name == 'Men')) == 0: sleep(1) wait += 1 if wait == 5: raise Exception("Timeout: Node %s wasn't found showing" %name) sleep(1) context.app.findChildren(lambda x: x.roleName == 'toggle button' and x.showing \ and x.sensitive and x.name == 'Men')[-1].click() renames = context.app.findChildren(lambda x: x.name == 'Rename' and x.showing) if not renames: context.app.findChildren(lambda x: x.roleName == 'toggle button' and x.showing and x.sensitive \ and x.name == 'Men')[-1].click() renames = context.app.findChildren(lambda x: x.name == 'Rename' and x.showing) renames[0].click() sleep(0.5) typeText(name) context.app.findChildren(lambda x: x.showing and x.name == 'Done')[0].click() @step('Create snapshot "{snap_name}" from machine "{vm_name}"') def create_snapshot(context, snap_name, vm_name): context.execute_steps(u""" * Select "%s" box * Press "Properties" * Press "Snapshots" * Add Snapshot named "%s" * Press "Back" """ %(vm_name, snap_name)) @step('Delete machines "{vm_name}" snapshot "{snap_name}"') def delete_snapshot(context, vm_name, snap_name): context.execute_steps(u""" * Select "%s" box * Press "Properties" * Press "Snapshots" """ % vm_name) name = context.app.findChildren(lambda x: x.name == snap_name and x.showing)[0] name.parent.child('Men').click() delete = context.app.findChildren(lambda x: x.name == "Delete" and x.showing)[0] delete.click() context.app.findChildren(lambda x: x.name == 'Undo' and x.showing)[0].grabFocus() pressKey('Tab') pressKey('Enter') sleep(2) get_showing_node_name('Back', context.app).click() sleep(0.5) @step('Revert machine "{vm_name}" to state "{snap_name}"') def revert_snapshot(context, vm_name, snap_name): context.execute_steps(u""" * Select "%s" box * Press "Properties" * Press "Snapshots" """ % vm_name) name = context.app.findChildren(lambda x: x.name == snap_name and x.showing)[0] name.parent.child('Men').click() revert = context.app.findChildren(lambda x: x.name == "Revert to this state" and x.showing)[0] revert.click() get_showing_node_name('Back', context.app).click() sleep(0.5)
Python
0.004521
@@ -855,16 +855,17 @@ == 'Men +u ')) == 0 @@ -858,32 +858,32 @@ 'Menu')) == 0:%0A - sleep(1) @@ -1192,32 +1192,33 @@ d x.name == 'Men +u ')%5B-1%5D.click()%0A%0A @@ -1529,16 +1529,17 @@ == 'Men +u ')%5B-1%5D.c @@ -2443,32 +2443,33 @@ arent.child('Men +u ').click()%0A d @@ -3104,32 +3104,32 @@ d x.showing)%5B0%5D%0A - name.parent. @@ -3138,16 +3138,17 @@ ild('Men +u ').click
9221d42cda7ba7a44d1462de75c0c53412998fb4
Remove unused code.
mysite/missions/tar/view_helpers.py
mysite/missions/tar/view_helpers.py
# This file is part of OpenHatch. # Copyright (C) 2010 Jack Grigg # Copyright (C) 2010 John Stumpo # Copyright (C) 2010, 2011 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from mysite.missions.base.view_helpers import * class IncorrectTarFile(Exception): pass class TarMission(object): WRAPPER_DIR_NAME = 'myproject-0.1' FILES = { 'hello.c': '''#include <stdio.h> int main(void) { printf("Hello World\\n"); return 0; } ''', 'Makefile': 'hello : hello.o\n' } @classmethod def check_tarfile(cls, tardata): """ Validate that tardata is gzipped and contains the correct files in a wrapper directory. """ try: tfile = tarfile.open(fileobj=StringIO(tardata), mode='r:gz') except tarfile.ReadError: raise IncorrectTarFile, 'Archive is not a valid gzipped tarball' # Check the filename list. filenames_wanted = [cls.WRAPPER_DIR_NAME] + \ [os.path.join(cls.WRAPPER_DIR_NAME, filename) for filename in cls.FILES.keys()] for member in tfile.getmembers(): if '/' not in member.name: if member.name in cls.FILES.keys(): raise IncorrectTarFile, 'No wrapper directory is present' elif member.isdir() and member.name != cls.WRAPPER_DIR_NAME: raise IncorrectTarFile, 'Wrapper directory name is incorrect: "%s"' % member.name if member.name not in filenames_wanted: msg = 'An unexpected entry "%s" is present' % member.name if '/._' in member.name: # This is an Apple Double file. msg += '. You can read about how to remove it <a href="/wiki/Tar_hints_for_Mac_OS_X_users">on our wiki</a>.' raise IncorrectTarFile, msg filenames_wanted.remove(member.name) if member.name == cls.WRAPPER_DIR_NAME: if not member.isdir(): raise IncorrectTarFile, '"%s" should be a directory but is not' % member.name else: if not member.isfile(): raise IncorrectTarFile, 'Entry "%s" is not a file' % member.name if tfile.extractfile(member).read() != cls.FILES[member.name.split('/')[-1]]: raise IncorrectTarFile, 'File "%s" has incorrect contents' % member.name if len(filenames_wanted) != 0: raise IncorrectTarFile, 'Archive does not contain all expected files (missing %s)' % ( ', '.join('"%s"' % f for f in filenames_wanted)) class UntarMission(object): TARBALL_DIR_NAME = 'ghello-0.4' TARBALL_NAME = TARBALL_DIR_NAME + '.tar.gz' FILE_WE_WANT = TARBALL_DIR_NAME + '/ghello.c' @classmethod def synthesize_tarball(cls): tdata = StringIO() tfile = tarfile.open(fileobj=tdata, mode='w:gz') tfile.add(os.path.join(get_mission_data_path('tar'), cls.TARBALL_DIR_NAME), cls.TARBALL_DIR_NAME) tfile.close() return tdata.getvalue() @classmethod def get_contents_we_want(cls): '''Get the data for the file we want from the tarball.''' return open(os.path.join(get_mission_data_path('tar'), cls.FILE_WE_WANT)).read()
Python
0
@@ -1123,2128 +1123,8 @@ %7D%0A%0A - @classmethod%0A def check_tarfile(cls, tardata):%0A %22%22%22%0A Validate that tardata is gzipped and contains the correct files in a wrapper directory.%0A %22%22%22%0A try:%0A tfile = tarfile.open(fileobj=StringIO(tardata), mode='r:gz')%0A except tarfile.ReadError:%0A raise IncorrectTarFile, 'Archive is not a valid gzipped tarball'%0A%0A # Check the filename list.%0A filenames_wanted = %5Bcls.WRAPPER_DIR_NAME%5D + %5C%0A %5Bos.path.join(cls.WRAPPER_DIR_NAME, filename)%0A for filename in cls.FILES.keys()%5D%0A for member in tfile.getmembers():%0A if '/' not in member.name:%0A if member.name in cls.FILES.keys():%0A raise IncorrectTarFile, 'No wrapper directory is present'%0A elif member.isdir() and member.name != cls.WRAPPER_DIR_NAME:%0A raise IncorrectTarFile, 'Wrapper directory name is incorrect: %22%25s%22' %25 member.name%0A if member.name not in filenames_wanted:%0A msg = 'An unexpected entry %22%25s%22 is present' %25 member.name%0A if '/._' in member.name:%0A # This is an Apple Double file.%0A msg += '. You can read about how to remove it %3Ca href=%22/wiki/Tar_hints_for_Mac_OS_X_users%22%3Eon our wiki%3C/a%3E.'%0A raise IncorrectTarFile, msg%0A filenames_wanted.remove(member.name)%0A if member.name == cls.WRAPPER_DIR_NAME:%0A if not member.isdir():%0A raise IncorrectTarFile, '%22%25s%22 should be a directory but is not' %25 member.name%0A else:%0A if not member.isfile():%0A raise IncorrectTarFile, 'Entry %22%25s%22 is not a file' %25 member.name%0A if tfile.extractfile(member).read() != cls.FILES%5Bmember.name.split('/')%5B-1%5D%5D:%0A raise IncorrectTarFile, 'File %22%25s%22 has incorrect contents' %25 member.name%0A if len(filenames_wanted) != 0:%0A raise IncorrectTarFile, 'Archive does not contain all expected files (missing %25s)' %25 (%0A ', '.join('%22%25s%22' %25 f for f in filenames_wanted))%0A%0A %0Acla
abe44ff58d61376db95c57d87dadc77a3cee8735
version bump
mesos/cli/__init__.py
mesos/cli/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = '0.1.3'
Python
0.000001
@@ -802,7 +802,7 @@ 0.1. -3 +4 '%0A
3c529c7c4f666488e3eadb6a4dd06b881e0ebb3e
Fix wrong assert function name
tests/unit/common/db/test_types.py
tests/unit/common/db/test_types.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for custom sqlalchemy types""" import mock import sqlalchemy as sa from rally.common.db import sa_types from tests.unit import test class JsonEncodedTest(test.TestCase): def test_impl(self): self.assertEqual(sa.Text, sa_types.JSONEncodedDict.impl) self.assertEqual(sa.Text, sa_types.JSONEncodedList.impl) self.assertEqual(sa.Text, sa_types.MutableJSONEncodedDict.impl) self.assertEqual(sa.Text, sa_types.MutableJSONEncodedList.impl) def test_process_bind_param(self): t = sa_types.JSONEncodedDict() self.assertEqual("{\"a\": 1}", t.process_bind_param({"a": 1}, None)) def test_process_bind_param_none(self): t = sa_types.JSONEncodedDict() self.assertIsNone(t.process_bind_param(None, None)) def test_process_result_value(self): t = sa_types.JSONEncodedDict() self.assertEqual({"a": 1}, t.process_result_value("{\"a\": 1}", None)) t = sa_types.JSONEncodedList() self.assertEqual([[2, 1], [1, 2]], t.process_result_value( "[[2, 1], [1, 2]]", None)) with mock.patch("json.loads") as mock_json_loads: t.process_result_value("[[2, 1], [1, 2]]", None) mock_json_loads.asser_called_once_with([(2, 1), (1, 2)]) def test_process_result_value_none(self): t = sa_types.JSONEncodedDict() self.assertIsNone(t.process_result_value(None, None)) t = sa_types.JSONEncodedList() self.assertIsNone(t.process_result_value(None, None)) class MutableDictTest(test.TestCase): def test_creation(self): sample = {"a": 1, "b": 2} d = sa_types.MutableDict(sample) self.assertEqual(sample, d) def test_coerce_dict(self): sample = {"a": 1, "b": 2} md = sa_types.MutableDict.coerce("test", sample) self.assertEqual(sample, md) self.assertIsInstance(md, sa_types.MutableDict) def test_coerce_mutable_dict(self): sample = {"a": 1, "b": 2} sample_md = sa_types.MutableDict(sample) md = sa_types.MutableDict.coerce("test", sample_md) self.assertEqual(sample, md) self.assertIs(sample_md, md) def test_coerce_unsupported(self): self.assertRaises(ValueError, sa_types.MutableDict.coerce, "test", []) @mock.patch.object(sa_types.MutableDict, "changed") def test_changed_on_setitem(self, mock_mutable_dict_changed): sample = {"a": 1, "b": 2} d = sa_types.MutableDict(sample) d["b"] = 3 self.assertEqual({"a": 1, "b": 3}, d) self.assertEqual(1, mock_mutable_dict_changed.call_count) @mock.patch.object(sa_types.MutableDict, "changed") def test_changed_on_delitem(self, mock_mutable_dict_changed): sample = {"a": 1, "b": 2} d = sa_types.MutableDict(sample) del d["b"] self.assertEqual({"a": 1}, d) self.assertEqual(1, mock_mutable_dict_changed.call_count) class MutableListTest(test.TestCase): def test_creation(self): sample = [1, 2, 3] d = sa_types.MutableList(sample) self.assertEqual(sample, d) def test_coerce_list(self): sample = [1, 2, 3] md = sa_types.MutableList.coerce("test", sample) self.assertEqual(sample, md) self.assertIsInstance(md, sa_types.MutableList) def test_coerce_mutable_list(self): sample = [1, 2, 3] sample_md = sa_types.MutableList(sample) md = sa_types.MutableList.coerce("test", sample_md) self.assertEqual(sample, md) self.assertIs(sample_md, md) def test_coerce_unsupported(self): self.assertRaises(ValueError, sa_types.MutableList.coerce, "test", {}) @mock.patch.object(sa_types.MutableList, "changed") def test_changed_on_append(self, mock_mutable_list_changed): sample = [1, 2, 3] lst = sa_types.MutableList(sample) lst.append(4) self.assertEqual([1, 2, 3, 4], lst) self.assertEqual(1, mock_mutable_list_changed.call_count) @mock.patch.object(sa_types.MutableList, "changed") def test_changed_on_setitem(self, mock_mutable_list_changed): sample = [1, 2, 3] lst = sa_types.MutableList(sample) lst[2] = 4 self.assertEqual([1, 2, 4], lst) self.assertEqual(1, mock_mutable_list_changed.call_count) @mock.patch.object(sa_types.MutableList, "changed") def test_changed_on_delitem(self, mock_mutable_list_changed): sample = [1, 2, 3] lst = sa_types.MutableList(sample) del lst[2] self.assertEqual([1, 2], lst) self.assertEqual(1, mock_mutable_list_changed.call_count) class TimeStampTestCase(test.TestCase): def test_process_bind_param(self): self.assertIsNone(sa_types.TimeStamp().process_bind_param( None, dialect=None)) self.assertEqual( 1498561749348996, sa_types.TimeStamp().process_bind_param(1498561749.348996, dialect=None)) def test_process_result_value(self): self.assertIsNone(sa_types.TimeStamp().process_result_value( None, dialect=None)) self.assertEqual( 1498561749.348996, sa_types.TimeStamp().process_result_value(1498561749348996, dialect=None))
Python
0.000061
@@ -1830,16 +1830,17 @@ ds.asser +t _called_ @@ -1853,24 +1853,26 @@ ith( +%22%5B %5B -( 2, 1 -), ( +%5D, %5B 1, 2 -) %5D +%5D%22 )%0A%0A
91fe55c6d6c8e49dcc18cef44bc24d6aa8a1d85b
Add more futher dependancies
clubs/migrations/0050_change_city_fields.py
clubs/migrations/0050_change_city_fields.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ # We manusally added these dependencies because all of them # store city code instead of the full city name. So they have # to be applied before this one. ('hpc', '0003_add_hpc_clubs'), ('researchhub', '0002_add_club'), ('events', '0013_add_2nd_hpc'), ('bulb', '0017_add_2017_bulb_clubs'), ('studentguide', '0012_add_studentguide_clubs'), ('clubs', '0049_logo_path'), ] operations = [ migrations.AlterField( model_name='club', name='city', field=models.CharField(max_length=20, verbose_name='\u0627\u0644\u0645\u062f\u064a\u0646\u0629', choices=[('\u0627\u0644\u0631\u064a\u0627\u0636', '\u0627\u0644\u0631\u064a\u0627\u0636'), ('\u062c\u062f\u0629', '\u062c\u062f\u0629'), ('\u0627\u0644\u0623\u062d\u0633\u0627\u0621', '\u0627\u0644\u0623\u062d\u0633\u0627\u0621')]), ), migrations.AlterField( model_name='college', name='city', field=models.CharField(max_length=20, verbose_name='\u0627\u0644\u0645\u062f\u064a\u0646\u0629', choices=[('\u0627\u0644\u0631\u064a\u0627\u0636', '\u0627\u0644\u0631\u064a\u0627\u0636'), ('\u062c\u062f\u0629', '\u062c\u062f\u0629'), ('\u0627\u0644\u0623\u062d\u0633\u0627\u0621', '\u0627\u0644\u0623\u062d\u0633\u0627\u0621')]), ), migrations.AlterField( model_name='team', name='city', field=models.CharField(default=b'', max_length=20, verbose_name='\u0627\u0644\u0645\u062f\u064a\u0646\u0629', blank=True, choices=[('\u0627\u0644\u0631\u064a\u0627\u0636', '\u0627\u0644\u0631\u064a\u0627\u0636'), ('\u062c\u062f\u0629', '\u062c\u062f\u0629'), ('\u0627\u0644\u0623\u062d\u0633\u0627\u0621', '\u0627\u0644\u0623\u062d\u0633\u0627\u0621')]), ), ]
Python
0.000002
@@ -487,30 +487,25 @@ '00 -17_add_2017_bulb_clubs +27_readathon_team '),%0A
68eeda85605fa84d7bea69dfeab97b3b1278b4d4
fix typo
examples/wordcloud_cn.py
examples/wordcloud_cn.py
# - * - coding: utf - 8 -*- """ create wordcloud with chinese ============================= Wordcloud is a very good tools, but if you want to create Chinese wordcloud only wordcloud is not enough. The file shows how to use wordcloud with Chinese. First, you need a Chinese word segmentation library jieba, jieba is now the most elegant the most popular Chinese word segmentation tool in python. You can use 'PIP install jieba'. To install it. As you can see, at the same time using wordcloud with jieba very convenient """ import jieba jieba.enable_parallel(4) # Setting up parallel processes :4 ,but unable to run on Windows from os import path from scipy.misc import imread import matplotlib.pyplot as plt import os # jieba.load_userdict("txt\userdict.txt") # add userdict by load_userdict() from wordcloud import WordCloud, ImageColorGenerator # get data directory (using getcwd() is needed to support running example in generated IPython notebook) d = path.dirname(__file__) if "__file__" in locals() else os.getcwd() stopwords_path = d + '/wc_cn/stopwords_cn_en.txt' # Chinese fonts must be set font_path = d + '/fonts/SourceHanSerif/SourceHanSerifK-Light.otf' # the path to save worldcloud imgname1 = d + '/wc_cn/LuXun.jpg' imgname2 = d + '/wc_cn/LuXun_colored.jpg' # read the mask / color image taken from back_coloring = imread(path.join(d, d + '/wc_cn/LuXun_color.jpg')) # Read the whole text. text = open(path.join(d, d + '/wc_cn/CalltoArms.txt')).read() # if you want use wordCloud,you need it # add userdict by add_word() userdict_list = ['阿Q', '孔乙己', '单四嫂子'] # The function for processing text with Jieba def jieba_processing_txt(text): for word in userdict_list: jieba.add_word(word) mywordlist = [] seg_list = jieba.cut(text, cut_all=False) liststr = "/ ".join(seg_list) with open(stopwords_path, encoding='utf-8') as f_stop: f_stop_text = f_stop.read() f_stop_seg_list = f_stop_text.splitlines() for myword in liststr.split('/'): if not (myword.strip() in f_stop_seg_list) and len(myword.strip()) > 1: mywordlist.append(myword) return ' '.join(mywordlist) wc = WordCloud(font_path=font_path, background_color="white", max_words=2000, mask=back_coloring, max_font_size=100, random_state=42, width=1000, height=860, margin=2,) wc.generate(jieba_processing_txt(text)) # create coloring from image image_colors_default = ImageColorGenerator(back_coloring) plt.figure() # recolor wordcloud and show plt.imshow(wc, interpolation="bilinear") plt.axis("off") plt.show() # save wordcloud wc.to_file(path.join(d, imgname1)) # create coloring from image image_colors_byImg = ImageColorGenerator(back_coloring) # show # we could also give color_func=image_colors directly in the constructor plt.imshow(wc.recolor(color_func=image_colors_byImg), interpolation="bilinear") plt.axis("off") plt.figure() plt.imshow(back_coloring, interpolation="bilinear") plt.axis("off") plt.show() # save wordcloud wc.to_file(path.join(d, imgname2))
Python
0.999991
@@ -115,17 +115,16 @@ ood tool -s , but if
63667e0d492c16e0c3bc4a398044a60df695cc61
Add more side effects
tests/unit/states/ssh_auth_test.py
tests/unit/states/ssh_auth_test.py
# -*- coding: utf-8 -*- ''' :codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>` ''' # Import Python libs from __future__ import absolute_import # Import Salt Testing Libs from salttesting import skipIf, TestCase from salttesting.mock import ( NO_MOCK, NO_MOCK_REASON, MagicMock, patch ) from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import Salt Libs from salt.states import ssh_auth ssh_auth.__salt__ = {} ssh_auth.__opts__ = {} @skipIf(NO_MOCK, NO_MOCK_REASON) class SshAuthTestCase(TestCase): ''' Test cases for salt.states.ssh_auth ''' # 'present' function tests: 1 def test_present(self): ''' Test to verifies that the specified SSH key is present for the specified user. ''' name = 'sshkeys' user = 'root' source = 'salt://ssh_keys/id_rsa.pub' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} mock = MagicMock(return_value='exists') mock_data = MagicMock(side_effect=['replace', 'new']) with patch.dict(ssh_auth.__salt__, {'ssh.check_key': mock, 'ssh.set_auth_key': mock_data}): with patch.dict(ssh_auth.__opts__, {'test': True}): comt = ('The authorized host key sshkeys is already ' 'present for user root') ret.update({'comment': comt}) self.assertDictEqual(ssh_auth.present(name, user, source), ret) with patch.dict(ssh_auth.__opts__, {'test': False}): comt = ('The authorized host key sshkeys ' 'for user root was updated') ret.update({'comment': comt, 'changes': {name: 'Updated'}}) self.assertDictEqual(ssh_auth.present(name, user, source), ret) comt = ('The authorized host key sshkeys ' 'for user root was added') ret.update({'comment': comt, 'changes': {name: 'New'}}) self.assertDictEqual(ssh_auth.present(name, user, source), ret) # 'absent' function tests: 1 def test_absent(self): ''' Test to verifies that the specified SSH key is absent. ''' name = 'sshkeys' user = 'root' source = 'salt://ssh_keys/id_rsa.pub' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} mock = MagicMock(side_effect=['User authorized keys file not present', 'Key removed']) mock_up = MagicMock(side_effect=['update', 'updated']) with patch.dict(ssh_auth.__salt__, {'ssh.rm_auth_key': mock, 'ssh.check_key': mock_up}): with patch.dict(ssh_auth.__opts__, {'test': True}): comt = ('Key sshkeys for user root is set for removal') ret.update({'comment': comt}) self.assertDictEqual(ssh_auth.absent(name, user, source), ret) comt = ('Key is already absent') ret.update({'comment': comt, 'result': True}) self.assertDictEqual(ssh_auth.absent(name, user, source), ret) with patch.dict(ssh_auth.__opts__, {'test': False}): comt = ('User authorized keys file not present') ret.update({'comment': comt, 'result': False}) self.assertDictEqual(ssh_auth.absent(name, user, source), ret) comt = ('Key removed') ret.update({'comment': comt, 'result': True, 'changes': {name: 'Removed'}}) self.assertDictEqual(ssh_auth.absent(name, user, source), ret) if __name__ == '__main__': from integration import run_tests run_tests(SshAuthTestCase, needs_daemon=False)
Python
0
@@ -2613,16 +2613,174 @@ esent',%0A + 'User authorized keys file not present',%0A 'User authorized keys file not present',%0A
ca31cc946de68bc7eb9a5b03601134a70e4b1564
Add SET to blacklisted keywords (#475)
explorer/app_settings.py
explorer/app_settings.py
from django.conf import settings from pydoc import locate # The 'correct' configuration for Explorer looks like: # EXPLORER_CONNECTIONS = { # 'Original Database': 'my_important_database_readonly_connection', # 'Client Database 2': 'other_database_connection' # } # EXPLORER_DEFAULT_CONNECTION = 'my_important_database_readonly_connection' EXPLORER_CONNECTIONS = getattr(settings, 'EXPLORER_CONNECTIONS', {}) EXPLORER_DEFAULT_CONNECTION = getattr( settings, 'EXPLORER_DEFAULT_CONNECTION', None ) # Change the behavior of explorer EXPLORER_SQL_BLACKLIST = getattr( settings, 'EXPLORER_SQL_BLACKLIST', ( 'ALTER', 'RENAME ', 'DROP', 'TRUNCATE', 'INSERT INTO', 'UPDATE', 'REPLACE', 'DELETE', 'CREATE TABLE', 'GRANT', 'OWNER TO' ) ) EXPLORER_SQL_WHITELIST = getattr( settings, 'EXPLORER_SQL_WHITELIST', ( 'CREATED', 'UPDATED', 'DELETED', 'REGEXP_REPLACE' ) ) EXPLORER_DEFAULT_ROWS = getattr(settings, 'EXPLORER_DEFAULT_ROWS', 1000) EXPLORER_SCHEMA_EXCLUDE_TABLE_PREFIXES = getattr( settings, 'EXPLORER_SCHEMA_EXCLUDE_TABLE_PREFIXES', ( 'auth_', 'contenttypes_', 'sessions_', 'admin_' ) ) EXPLORER_SCHEMA_INCLUDE_TABLE_PREFIXES = getattr( settings, 'EXPLORER_SCHEMA_INCLUDE_TABLE_PREFIXES', None ) EXPLORER_SCHEMA_INCLUDE_VIEWS = getattr( settings, 'EXPLORER_SCHEMA_INCLUDE_VIEWS', False ) EXPLORER_TRANSFORMS = getattr(settings, 'EXPLORER_TRANSFORMS', []) EXPLORER_PERMISSION_VIEW = getattr( settings, 'EXPLORER_PERMISSION_VIEW', lambda r: r.user.is_staff ) EXPLORER_PERMISSION_CHANGE = getattr( settings, 'EXPLORER_PERMISSION_CHANGE', lambda r: r.user.is_staff ) EXPLORER_RECENT_QUERY_COUNT = getattr( settings, 'EXPLORER_RECENT_QUERY_COUNT', 10 ) EXPLORER_ASYNC_SCHEMA = getattr(settings, 'EXPLORER_ASYNC_SCHEMA', False) DEFAULT_EXPORTERS = [ ('csv', 'explorer.exporters.CSVExporter'), ('json', 'explorer.exporters.JSONExporter'), ] try: import xlsxwriter # noqa DEFAULT_EXPORTERS.insert( 1, ('excel', 'explorer.exporters.ExcelExporter'), ) except ImportError: pass EXPLORER_DATA_EXPORTERS = getattr( settings, 'EXPLORER_DATA_EXPORTERS', DEFAULT_EXPORTERS ) CSV_DELIMETER = getattr(settings, "EXPLORER_CSV_DELIMETER", ",") # API access EXPLORER_TOKEN = getattr(settings, 'EXPLORER_TOKEN', 'CHANGEME') # These are callable to aid testability by dodging the settings cache. # There is surely a better pattern for this, but this'll hold for now. EXPLORER_GET_USER_QUERY_VIEWS = lambda: getattr( # noqa settings, 'EXPLORER_USER_QUERY_VIEWS', {} ) EXPLORER_TOKEN_AUTH_ENABLED = lambda: getattr( # noqa settings, 'EXPLORER_TOKEN_AUTH_ENABLED', False ) EXPLORER_NO_PERMISSION_VIEW = lambda: locate(# noqa getattr( settings, 'EXPLORER_NO_PERMISSION_VIEW', 'explorer.views.auth.safe_login_view_wrapper', ), ) # Async task related. Note that the EMAIL_HOST settings must be set up for # email to work. ENABLE_TASKS = getattr(settings, "EXPLORER_TASKS_ENABLED", False) S3_ACCESS_KEY = getattr(settings, "EXPLORER_S3_ACCESS_KEY", None) S3_SECRET_KEY = getattr(settings, "EXPLORER_S3_SECRET_KEY", None) S3_BUCKET = getattr(settings, "EXPLORER_S3_BUCKET", None) FROM_EMAIL = getattr( settings, 'EXPLORER_FROM_EMAIL', 'django-sql-explorer@example.com' ) UNSAFE_RENDERING = getattr(settings, "EXPLORER_UNSAFE_RENDERING", False)
Python
0
@@ -823,16 +823,31 @@ WNER TO' +,%0A 'SET' %0A )%0A)
d10bb3695ee93ffd8b91d4d82adaf484de9e9bf1
Rename NeuronNetwork to NeuralNetwork
ANN.py
ANN.py
from random import uniform class Neuron: def __init__(self, parents=[]): self.parents = [{ 'neuron': parent, 'weight': uniform(-1, 1), 'slope': uniform(-1, 1), } for parent in parents] def calculate(self, increment=0): self.output = sum([parent['neuron'].output * (parent['weight'] + increment * parent['slope']) for parent in self.parents]) > 0 def mutate(self, increment): for parent in self.parents: parent['weight'] += increment * parent['slope'] parent['slope'] = uniform(-1, 1) class NeuronNetwork: def __init__(self, inputs, outputs, hidden, rows): self.bias = Neuron() self.neurons = [] for row in xrange(rows): self.neurons.append([]) if row == 0: for input_ in xrange(inputs): self.neurons[row].append(Neuron(parents=[])) elif row == rows - 1: for output in xrange(outputs): self.neurons[row].append(Neuron(parents=self.neurons[row - 1] + [self.bias])) else: for column in xrange(hidden): self.neurons[row].append(Neuron(parents=self.neurons[row - 1] + [self.bias])) self.bias.output = True def calculate(self, inputs, increment=0): for i, neuron_row in enumerate(self.neurons): for j, neuron in enumerate(neuron_row): if i == 0: neuron.output = inputs[j] else: neuron.calculate(increment=increment) return [neuron.output for neuron in self.neurons[-1]] def mutate(self, increment): for neuron_row in self.neurons: for neuron in neuron_row: neuron.mutate(increment=increment)
Python
0.99959
@@ -597,18 +597,18 @@ ass Neur -on +al Network:
74935550f886edfefa26298a98874e4c2dd2ab53
Fold a line
extenteten/util.py
extenteten/util.py
import functools import numpy import tensorflow as tf def static_shape(tensor): return tf.convert_to_tensor(tensor).get_shape().as_list() def static_shapes(*tensors): return _map_to_list(static_shape, tensors) def static_rank(tensor): return len(static_shape(tf.convert_to_tensor(tensor))) def static_ranks(*tensors): return _map_to_list(static_rank, tensors) def _map_to_list(func, xs): return list(map(func, xs)) def dtypes(*tensors): return [tensor.dtype for tensor in tensors] def func_scope(name=None, initializer=None): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): with tf.variable_scope(name or func.__name__, initializer=initializer): return func(*args, **kwargs) return wrapper return decorator def on_device(device_name): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): with tf.device(device_name): return func(*args, **kwargs) return wrapper return decorator def dimension_indices(tensor, start=0): return [*range(static_rank(tensor))][start:] @func_scope() def dtype_min(dtype): return tf.constant(_numpy_min(dtype.as_numpy_dtype)) def _numpy_min(dtype): return numpy.finfo(dtype).min @func_scope() def dtype_epsilon(dtype): return tf.constant(_numpy_epsilon(dtype.as_numpy_dtype)) def _numpy_epsilon(dtype): return numpy.finfo(dtype).eps def flatten(x): return tf.reshape(x, [-1]) def rename(x, name): return tf.identity(x, name)
Python
0.999989
@@ -708,16 +708,51 @@ _name__, +%0A initial
952d70244f885dc194d83d5bb598fa9ebcdfceb2
Add no store command-line option to trends util script
app/utils/insert/trendsCountryAndTowns.py
app/utils/insert/trendsCountryAndTowns.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Utility to get trend data and add to the database. Expects a single country name and uses the country and child town WOEIDs to get trend data. Run file directly (not as a module) and with `--help` flag in order to see usage instructions. """ import time # Allow imports to be done when executing this file directly. import os import sys sys.path.insert(0, os.path.abspath(os.path.join( os.path.dirname(__file__), os.path.pardir, os.path.pardir) )) from lib import places, trends from lib.query.place import countryReport from lib.config import AppConf appConf = AppConf() def listCountries(): print u'See available countries below...\n' countryReport.showTownCountByCountry(byName=True) print u'Enter a country name from the above an argument.' print u'Or, use `--default` flag to get the configured country, which ' \ u'is currently `{}`.'.format(appConf.get('TrendCron', 'countryName')) def main(args): """ Provide help for the user or runs as a produce to get data from the Twitter API for the selected country. The max time is set in the app configuration file. If the duration of the current iteration was less than the required max then we sleep for the remaining number of seconds to make the iteration's total time close to 12 seconds. If the duration was more, or the max was configured to zero, no waiting is applied. """ if not args or set(args) & set(('-h', '--help')): print u'Usage: ./app/utils/trendsCountryAndTowns.py'\ ' [-d|--default|COUNTRYNAME] [-s|--show] [-f|--fast] [-h|--help]' elif set(args) & set(('-s', '--show')): listCountries() else: print u'Starting job for trends by country and towns.' if set(args) & set(('-d', '--default')): # Use configured country name. countryName = appConf.get('TrendCron', 'countryName') else: # Set country name string from arguments list, ignoring flags. words = [word for word in args if not word.startswith('-')] countryName = ' '.join(words) assert countryName, ('Country name input is missing.') if set(args) & set(('-f', '--fast')): # User can override the waiting with a --fast flag, which # means queries will be done quick succession, at least within # each 15 min rate-limited window. minSeconds = 0 else: minSeconds = appConf.getint('TrendCron', 'minSeconds') woeidIDs = places.countryAndTowns(countryName) for woeid in woeidIDs: start = time.time() trends.insertTrendsForWoeid(woeid) duration = time.time() - start print u" took {}s".format(int(duration)) diff = minSeconds - duration if diff > 0: time.sleep(diff) main(sys.argv[1:])
Python
0
@@ -1632,16 +1632,49 @@ %7C--fast%5D +' %5C%0A ' %5B-n%7C--no-store%5D %5B-h%7C--h @@ -2646,16 +2646,77 @@ tryName) +%0A delete = bool(set(args) & set(('-n', '--no-store'))) %0A%0A @@ -2817,16 +2817,31 @@ id(woeid +, delete=delete )%0A
d6a58142810c0729f15220a9933ace66881d8403
Fix the test
tests/test_callbacks.py
tests/test_callbacks.py
import pytest import random from aiotg import Bot, Chat, InlineQuery from aiotg import MESSAGE_TYPES from testfixtures import LogCapture API_TOKEN = "test_token" bot = Bot(API_TOKEN) def custom_msg(msg): template = { "message_id": 0, "from": {"first_name": "John"}, "chat": {"id": 0, "type": "private"} } template.update(msg) return template def text_msg(text): return custom_msg({"text": text}) def inline_query(query): return { "from": {"first_name": "John"}, "offset": "", "query": query, "id": "9999" } def callback_query(data): return { "from": {"first_name": "John"}, "data": data, "id": "9999", "message": custom_msg({}) } def test_command(): called_with = None @bot.command(r"/echo (.+)") def echo(chat, match): nonlocal called_with called_with = match.group(1) # Let's check sender repr as well assert repr(chat.sender) == "John" bot._process_message(text_msg("/echo foo")) assert called_with == "foo" def test_default(): called_with = None @bot.default def default(chat, message): nonlocal called_with called_with = message["text"] bot._process_message(text_msg("foo bar")) assert called_with == "foo bar" def test_inline(): called_with = None @bot.inline def inline(query): nonlocal called_with called_with = query.query bot._process_inline_query(inline_query("foo bar")) assert called_with == "foo bar" def test_callback_default(): bot._process_callback_query(callback_query("foo")) def test_callback(): called_with = None @bot.callback def callback(chat, cq): nonlocal called_with called_with = cq.data bot._process_callback_query(callback_query("foo")) assert called_with == "foo" def test_updates(): update = { "update_id": 0, "message": text_msg("foo bar") } updates = { "result": [update], "ok": True } called_with = None @bot.default def default(chat, message): nonlocal called_with called_with = message["text"] bot._process_updates(updates) assert called_with == "foo bar" def test_updates_failed(): updates = { "ok": False, "description": "Opps" } with LogCapture() as l: bot._process_updates(updates) l.check(('aiotg', 'ERROR', 'getUpdates error: Opps')) @pytest.mark.parametrize("mt", MESSAGE_TYPES) def test_handle(mt): called_with = None @bot.handle(mt) def handle(chat, media): nonlocal called_with called_with = media value = random.random() bot._process_message(custom_msg({ mt: value })) assert called_with == value class MockBot: def __init__(self): self.calls = {} def api_call(self, method, **params): self.calls[method] = params def send_message(self, chat_id, text, **kwargs): return self.api_call( "sendMessage", chat_id=chat_id, text=text, **kwargs ) def test_chat_methods(): bot = MockBot() chat_id = 42 chat = Chat(bot, chat_id) chat.send_text("hello") assert "sendMessage" in bot.calls assert bot.calls["sendMessage"]["text"] == "hello" # Just test a single wrapper, the rest are same chat.send_photo() assert "sendPhoto" in bot.calls assert isinstance(bot.calls["sendPhoto"]["chat_id"], str) def test_chat_reply(): bot = MockBot() msg = text_msg("Reply!") chat = Chat.from_message(bot, msg) chat.reply("Hi " + repr(chat.sender)) assert "sendMessage" in bot.calls assert bot.calls["sendMessage"]["text"] == "Hi John" def test_inline_answer(): bot = MockBot() src = inline_query("Answer!") iq = InlineQuery(bot, src) results = [{ "type": "article", "id": "000", "title": "test", "message_text": "Foo bar" }] iq.answer(results) assert "answerInlineQuery" in bot.calls assert isinstance(bot.calls["answerInlineQuery"]["results"], str) def test_edit_message(): bot = MockBot() chat_id = 42 chat = Chat(bot, chat_id) message_id = chat.send_text("hello")["result"]["message_id"] assert "sendMessage" in bot.calls assert bot.calls["sendMessage"]["text"] == "hello" chat.edit_text(message_id, "bye") assert "editMessageText" in bot.calls assert bot.calls["editMessageText"]["text"] == "bye" assert bot.calls["editMessageText"]["chat_id"] == chat_id assert bot.calls["editMessageText"]["message_id"] == message_id
Python
0.999999
@@ -3152,16 +3152,236 @@ )%0A%0A + def edit_message_text(self, chat_id, message_id, text, **kwargs):%0A return self.api_call(%0A %22editMessageText%22,%0A message_id=message_id,%0A text=text,%0A **kwargs%0A )%0A%0A %0Adef tes @@ -4436,32 +4436,54 @@ hat_id = 42%0A +message_id = 1337%0A chat = Chat(bot, @@ -4495,167 +4495,8 @@ _id) -%0A%0A message_id = chat.send_text(%22hello%22)%5B%22result%22%5D%5B%22message_id%22%5D%0A assert %22sendMessage%22 in bot.calls%0A assert bot.calls%5B%22sendMessage%22%5D%5B%22text%22%5D == %22hello%22 %0A%0A @@ -4634,70 +4634,8 @@ ye%22%0A - assert bot.calls%5B%22editMessageText%22%5D%5B%22chat_id%22%5D == chat_id%0A
14d1cbae9323e3ff7d80480b39a96b76cada94b0
Add a clear_requested signal
pyqode/core/frontend/widgets/menu_recents.py
pyqode/core/frontend/widgets/menu_recents.py
""" Provides a menu that display the list of recent files and a RecentFilesManager which use your application's QSettings to store the list of recent files. """ import os from pyqode.qt import QtCore, QtWidgets class RecentFilesManager: """ Manages a list of recent files. The list of files is stored in your application QSettings. """ #: Maximum number of files kept in the list. max_recent_files = 15 def __init__(self, organisation, application): self._settings = QtCore.QSettings(organisation, application) def clear(self): """ Clears recent files in QSettings """ self._settings.setValue('recentFiles', []) def get_recent_files(self): """ Gets the list of recent files. (files that do not exists anymore are automatically filtered) """ ret_val = [] files = self._settings.value('recentFiles', []) # empty list if files is None: files = [] # single file if isinstance(files, str): files = [files] # filter files, remove files that do not exist anymore for file in files: if os.path.exists(file): ret_val.append(file) return ret_val def open_file(self, file): """ Adds a file to the list (and move it to the top of the list if the file already exists) """ files = self.get_recent_files() try: files.remove(file) except ValueError: pass files.insert(0, file) # discard old files del files[self.max_recent_files:] self._settings.setValue('recentFiles', files) class MenuRecentFiles(QtWidgets.QMenu): """ Menu that manage the list of recent files. To use the menu, simply pass connect to the open_requested signal. """ #: Signal emitted when the user clicked on a recent file action. #: The parameter is the path of the file to open. open_requested = QtCore.Signal(str) def __init__(self, parent, recent_files_manager=None, title='Recent files'): """ :param organisation: name of your organisation as used for your own QSettings :param application: name of your application as used for your own QSettings :param parent: parent object """ super().__init__(title, parent) #: Recent files manager self.manager = recent_files_manager #: List of recent files actions self.recent_files_actions = [] self.update_actions() def update_actions(self): """ Updates the list of actions. """ self.clear() self.recent_files_actions[:] = [] for file in self.manager.get_recent_files(): action = QtWidgets.QAction(self) action.setText(os.path.split(file)[1]) action.setData(file) action.triggered.connect(self._on_action_triggered) self.addAction(action) self.recent_files_actions.append(action) self.addSeparator() action_clear = QtWidgets.QAction('Clear list', self) action_clear.triggered.connect(self.clear_recent_files) self.addAction(action_clear) def clear_recent_files(self): """ Clear recent files and menu. """ self.manager.clear() self.update_actions() def _on_action_triggered(self): """ Emits open_requested when a recent file action has been triggered. """ action = self.sender() assert isinstance(action, QtWidgets.QAction) path = action.data() self.open_requested.emit(path) self.update_actions()
Python
0.000001
@@ -2035,16 +2035,54 @@ nal(str) +%0A clear_requested = QtCore.Signal() %0A%0A de @@ -3495,32 +3495,68 @@ update_actions() +%0A self.clear_requested.emit() %0A%0A def _on_ac
fdd8e33b58f8ffba50dff86931a47daf396903e8
Revert tweak to TokenPermissions.has_permission()
netbox/netbox/api/authentication.py
netbox/netbox/api/authentication.py
from django.conf import settings from rest_framework import authentication, exceptions from rest_framework.permissions import BasePermission, DjangoObjectPermissions, SAFE_METHODS from users.models import Token class TokenAuthentication(authentication.TokenAuthentication): """ A custom authentication scheme which enforces Token expiration times. """ model = Token def authenticate_credentials(self, key): model = self.get_model() try: token = model.objects.prefetch_related('user').get(key=key) except model.DoesNotExist: raise exceptions.AuthenticationFailed("Invalid token") # Enforce the Token's expiration time, if one has been set. if token.is_expired: raise exceptions.AuthenticationFailed("Token expired") if not token.user.is_active: raise exceptions.AuthenticationFailed("User inactive") return token.user, token class TokenPermissions(DjangoObjectPermissions): """ Custom permissions handler which extends the built-in DjangoModelPermissions to validate a Token's write ability for unsafe requests (POST/PUT/PATCH/DELETE). """ # Override the stock perm_map to enforce view permissions perms_map = { 'GET': ['%(app_label)s.view_%(model_name)s'], 'OPTIONS': [], 'HEAD': ['%(app_label)s.view_%(model_name)s'], 'POST': ['%(app_label)s.add_%(model_name)s'], 'PUT': ['%(app_label)s.change_%(model_name)s'], 'PATCH': ['%(app_label)s.change_%(model_name)s'], 'DELETE': ['%(app_label)s.delete_%(model_name)s'], } def __init__(self): # LOGIN_REQUIRED determines whether read-only access is provided to anonymous users. self.authenticated_users_only = settings.LOGIN_REQUIRED super().__init__() def _verify_write_permission(self, request): # If token authentication is in use, verify that the token allows write operations (for unsafe methods). if request.method in SAFE_METHODS or request.auth.write_enabled: return True def has_permission(self, request, view): # User must be authenticated if not request.user.is_authenticated: return False # Enforce Token write ability if isinstance(request.auth, Token) and not self._verify_write_permission(request): return False return super().has_permission(request, view) def has_object_permission(self, request, view, obj): # Enforce Token write ability if isinstance(request.auth, Token) and not self._verify_write_permission(request): return False return super().has_object_permission(request, view, obj) class IsAuthenticatedOrLoginNotRequired(BasePermission): """ Returns True if the user is authenticated or LOGIN_REQUIRED is False. """ def has_permission(self, request, view): if not settings.LOGIN_REQUIRED: return True return request.user.is_authenticated
Python
0
@@ -2149,117 +2149,8 @@ ):%0A%0A - # User must be authenticated%0A if not request.user.is_authenticated:%0A return False%0A%0A
f2cc74d79abf42c0f199c48ef9110bce6cec45b4
Update alcatel_sros_ssh.py
netmiko/alcatel/alcatel_sros_ssh.py
netmiko/alcatel/alcatel_sros_ssh.py
''' Alcatel-Lucent SROS support ''' from netmiko.ssh_connection import SSHConnection class AlcatelSrosSSH(SSHConnection): ''' SROS support ''' def session_preparation(self): self.disable_paging(command="\environment no more\n") def enable(self): pass
Python
0
@@ -222,17 +222,16 @@ ommand=%22 -%5C environm
91ff2ed96dc3ba197f71be935ac23796d40ef5dc
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/812a20bfa97f7b56eb3340c2f75358db58483974.
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "c442a283246c2060d139d4cadb0f8ff59ee7e7da" TFRT_SHA256 = "649107aabf7a242678448c44d4a51d5355904222de7d454a376ad511c803cf0f" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
Python
0.000002
@@ -210,132 +210,132 @@ = %22 -c442a283246c2060d139d4cadb0f8ff59ee7e7da%22%0A TFRT_SHA256 = %22649107aabf7a242678448c44d4a51d5355904222de7d454a376ad511c803cf0 +812a20bfa97f7b56eb3340c2f75358db58483974%22%0A TFRT_SHA256 = %228235d34c674a842fb08f5fc7f7b6136a1af1dbb20a2ec7213dd99848b884878 f%22%0A%0A
90597ba2e70ce73a7531a27f851bdbb27424d04f
Update nbtest.py
newB_in_progress/test/arj/nbtest.py
newB_in_progress/test/arj/nbtest.py
#.*.F means function #see line 28 adding to dict not working import re operators = ['+','-'] conditionals = ['if','when'] #then replaces the : as in if x=3 : with open('txt.nub','r+') as f: filecontent = f.read()+' <EOF> ' values={} #global so as to be accessible from funcs def parseF(textlist): data=textlist incr_var = 0 while incr_var < len(data) : forwardIndex = incr_var + 1 backwardIndex = incr_var - 1 if forwardIndex >= len(data): #preventing out of bounds in array forwardIndex = incr_var cur_token = data[incr_var] next_token = data[forwardIndex] previous_token = data[backwardIndex] index=data.index(cur_token) if cur_token == 'ASSIGN': values[previous_token]=next_token #adding to dict not working elif cur_token == 'output': if next_token in values : print(values[next_token]) #printing the var by fetching the value else : print(next_token.replace('STRING','').replace('NUM','').strip()) incr_var+=1 def tokenF(load): data = load #takes in a list t_var='' incr_var=0 #num = ['0','1','2','3','4','5','6','7','8','9'] not needed checked in isdigit() while incr_var < len(data): cur_char = data[incr_var] index=data.index(cur_char) #get index of current char pattern=r"'(.)*'" #regex for string match= re.search(pattern,cur_char) #cur_char is not only one char but can also be 20 for example if cur_char in conditionals : data[index] = 'COND ' # #cur_char.isdigit()==True: # or unicode.isNumeric() elif cur_char in operators: data[index] = 'OPER '+data[index] elif cur_char == '=' and data[incr_var+1] != '=' and data[incr_var-1] != '=': data[index] = 'ASSIGN' elif cur_char == '=' and data[incr_var+1] == '=': data[index] = 'EQUAL' data.remove('=') elif match is not None: data[index] = 'STRING '+data[index] incr_var+=1 return data #print(values) def splitF(feed): raw = feed rawChar = ['(',')','+','-','*','/','&','%','=',' ','\n',';','/*','*/','=='] formattedChar = [' ( ',' ) ',' + ',' - ',' * ',' / ',' & ',' % ',' = ',' ',' NEWLINE ',' ; ',' /* ',' */ ','dequal'] #replace with space incr_var = 0 while incr_var < len(rawChar): raw =''+raw.replace(rawChar[incr_var],formattedChar[incr_var]) incr_var +=1 #print(raw) return raw.split() print(splitF(filecontent)) #debug print(tokenF(splitF(filecontent))) print(values) print(' ') print(parseF(tokenF(splitF(filecontent)))) #real
Python
0.000001
@@ -53,16 +53,43 @@ working +%0A#resolved just wrong debug %0A%0Aimport
49ad9b8162a9113f3c4c69818553de2cb6bf66df
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/da541333433f74881d8f44947369756d40d5e7fe.
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "bdb99de6e7e5fcd5a7e55895bb1c658ea0336136" TFRT_SHA256 = "a251c274cf0bbd805e221677cf4988c27156af54655b906eab11d9e3ee37d0b5" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
Python
0.000001
@@ -228,133 +228,133 @@ = %22 -bdb99de6e7e5fcd5a7e55895bb1c658ea0336136%22%0A TFRT_SHA256 = %22a251c274cf0bbd805e221677cf4988c27156af54655b906eab11d9e3ee37d0b5 +da541333433f74881d8f44947369756d40d5e7fe%22%0A TFRT_SHA256 = %22df492c902908141405e88af81c4bb72580e3a5615bd91448b7c44a2c0d29009a %22%0A%0A
be0e9cf9a195f44a033bb8b3aeb13febf3cea9cf
Remove check in token credential (#14134)
src/azure-cli/azure/cli/command_modules/storage/oauth_token_util.py
src/azure-cli/azure/cli/command_modules/storage/oauth_token_util.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import threading class TokenUpdater(object): """ This class updates a given token_credential periodically using the provided callback function. It shows one way of making sure the credential does not become expired. """ def __init__(self, token_credential, cli_ctx): self.token_credential = token_credential self.cli_ctx = cli_ctx # the timer needs to be protected, as later on it is possible that one thread is setting a new timer and # another thread is trying to cancel the timer self.lock = threading.Lock() self.timer_callback() def timer_callback(self): # call to get a new token and set a timer from azure.cli.core._profile import Profile from datetime import datetime # should give back token that is valid for at least 5 mins token = Profile(cli_ctx=self.cli_ctx).get_raw_token( resource="https://storage.azure.com", subscription=self.cli_ctx.data['subscription_id'])[0][2] try: self.token_credential.token = token['accessToken'] seconds_left = (datetime.strptime(token['expiresOn'], "%Y-%m-%d %H:%M:%S.%f") - datetime.now()).seconds except KeyError: # needed to deal with differing unserialized MSI token payload self.token_credential.token = token['access_token'] seconds_left = (datetime.fromtimestamp(int(token['expires_on'])) - datetime.now()).seconds if seconds_left < 180: # acquired token expires in less than 3 mins raise Exception("Acquired a token expiring in less than 3 minutes") with self.lock: self.timer = threading.Timer(seconds_left - 180, self.timer_callback) self.timer.daemon = True self.timer.start() def cancel(self): # the timer needs to be canceled once the command has finished executing # if not the timer will keep going with self.lock: self.timer.cancel()
Python
0
@@ -356,16 +356,80 @@ reading%0A +from knack.log import get_logger%0A%0Alogger = get_logger(__name__)%0A %0A%0Aclass @@ -1486,24 +1486,64 @@ cessToken'%5D%0A + expire = token%5B'expiresOn'%5D%0A @@ -1576,34 +1576,22 @@ trptime( -token%5B' expire -sOn'%5D , %22%25Y-%25m @@ -1799,32 +1799,25 @@ -seconds_left +expire = -( datetime @@ -1856,16 +1856,51 @@ s_on'%5D)) +%0A seconds_left = (expire - datet @@ -1910,32 +1910,33 @@ .now()).seconds%0A +%0A if secon @@ -1962,19 +1962,33 @@ -# a +logger.warning(%22A cquired @@ -1997,114 +1997,71 @@ ken +will expire -s in less than 3 mins%0A raise Exception(%22Acquired a token expiring in less than 3 minutes%22 + on %25s. Current time is %25s.%22, expire, datetime.now() )%0A%0A
a8a0e24d9ee90676601a52c564eadb7ff264d5cd
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/356740e3a2bf884abd27b2ca362fe8108a7cd257.
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "b570a1921c9e55ac53c8972bd2bfd37cd0eb510d" TFRT_SHA256 = "01295fc2a90aa2d665890adbe8701e2ae2372028d3b8266cba38ceddccb42af6" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
Python
0.000003
@@ -210,133 +210,133 @@ = %22 -b570a1921c9e55ac53c8972bd2bfd37cd0eb510d%22%0A TFRT_SHA256 = %2201295fc2a90aa2d665890adbe8701e2ae2372028d3b8266cba38ceddccb42af6 +356740e3a2bf884abd27b2ca362fe8108a7cd257%22%0A TFRT_SHA256 = %22c5c806b5f5acb345eca8db4bc49053df60d0b368193f5b78346cf6acdc4bc3e8 %22%0A%0A
ce977d24d49b7e03b6db5b5590e8fc0ddf8e9127
fix the deploy order in the daemon. closes #862
fabfile/daemons.py
fabfile/daemons.py
#!/usr/bin/env python from time import sleep, time from fabric.api import execute, task, env import app_config import sys import traceback def safe_execute(*args, **kwargs): """ Wrap execute() so that all exceptions are caught and logged. """ try: execute(*args, **kwargs) except: print "ERROR [timestamp: %d]: Here's the traceback" % time() ex_type, ex, tb = sys.exc_info() traceback.print_tb(tb) del tb @task def deploy(): """ Harvest data and deploy slides indefinitely """ while True: start = time() safe_execute('ap.update') safe_execute('data.load_updates', 'data/update.json') safe_execute('liveblog.update') safe_execute('deploy_slides') safe_execute('deploy_big_boards') safe_execute('deploy_bop') duration = int(time() - start) wait = app_config.DEPLOY_INTERVAL - duration print "== Deploying slides ran in %ds, waiting %ds ==" % (duration, wait) if wait < 0: print "WARN: Deploying slides took %d seconds longer than %d" % (abs(wait), app_config.DEPLOY_INTERVAL) wait = 0 sleep(wait)
Python
0.000201
@@ -750,22 +750,19 @@ 'deploy_ -slides +bop ')%0A @@ -827,19 +827,22 @@ 'deploy_ -bop +slides ')%0A%0A
7e95e0b8adb4315c8f8a0c5aa8c6ccc588cbee18
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/0d8bae2de531db2e4e4efd3a4e168b39795458b9.
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "4b2fe81ea82e4c33783b5b62973fbe84dbc6f484" TFRT_SHA256 = "f0e6e0fd3e5245d993cd4146d8245e130e724d0070401a25f730b02c7296d1c4" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
Python
0.000001
@@ -228,133 +228,133 @@ = %22 -4b2fe81ea82e4c33783b5b62973fbe84dbc6f484%22%0A TFRT_SHA256 = %22f0e6e0fd3e5245d993cd4146d8245e130e724d0070401a25f730b02c7296d1c4 +0d8bae2de531db2e4e4efd3a4e168b39795458b9%22%0A TFRT_SHA256 = %22fa7cd1e72eec99562bf916e071222df2e72e90c67dcb14137ffbef07a4fcac5f %22%0A%0A
e2d066811a5e943600c170aba0cf797c104d1588
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/5f6e52142a3592d0cfa058dbfd140cad49ed451a.
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "736eeebfb56c6d0de138f4a29286140d8c26d927" TFRT_SHA256 = "b584ee5ce5ecaadf289b0997987dfb5eec6cf3623f30b83028923cad20914e61" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
Python
0
@@ -228,133 +228,133 @@ = %22 -736eeebfb56c6d0de138f4a29286140d8c26d927%22%0A TFRT_SHA256 = %22b584ee5ce5ecaadf289b0997987dfb5eec6cf3623f30b83028923cad20914e61 +5f6e52142a3592d0cfa058dbfd140cad49ed451a%22%0A TFRT_SHA256 = %228e1efbd7df0fdeb5186b178d7c8b90c33ba80cef54999e988097bd1ff0f4e8fe %22%0A%0A
9f47b0cf3d7e26339aaf51dce912db30cc50c587
Debug values
promgen/prometheus.py
promgen/prometheus.py
import collections import json import logging import subprocess import tempfile import requests from django.conf import settings from django.template.loader import render_to_string from promgen import models logger = logging.getLogger(__name__) def check_rules(rules): with tempfile.NamedTemporaryFile(mode='w', encoding='utf8') as fp: logger.debug('Rendering to %s', fp.name) fp.write(render_rules(rules)) fp.flush() subprocess.check_call([ settings.PROMGEN['rule_writer']['promtool_path'], 'check-rules', fp.name ]) def render_rules(rules=None): if rules is None: rules = models.Rule.objects.all() return render_to_string('promgen/prometheus.rule', {'rules': rules}) def render_config(service=None, project=None): data = [] for exporter in models.Exporter.objects.all(): if not exporter.project.farm: continue if service and exporter.project.service.name != service.name: continue if project and exporter.project.name != project.name: continue labels = { 'project': exporter.project.name, 'service': exporter.project.service.name, 'farm': exporter.project.farm.name, 'job': exporter.job, } if exporter.path: labels['__metrics_path__'] = exporter.path hosts = [] for host in models.Host.objects.filter(farm=exporter.project.farm): hosts.append('{}:{}'.format(host.name, exporter.port)) data.append({ 'labels': labels, 'targets': hosts, }) return json.dumps(data, indent=2, sort_keys=True) def write_config(): with open(settings.PROMGEN['config_writer']['path'], 'w+', encoding='utf8') as fp: fp.write(render_config()) for target in settings.PROMGEN['config_writer'].get('notify', []): try: requests.post(target).raise_for_status() except Exception as e: logger.error('%s while notifying %s', e, target) def write_rules(): with open(settings.PROMGEN['rule_writer']['rule_path'], 'w+', encoding='utf8') as fp: fp.write(render_rules()) for target in settings.PROMGEN['rule_writer'].get('notify', []): try: requests.post(target).raise_for_status() except Exception as e: logger.error('%s while notifying %s', e, target) def reload_prometheus(): target = '{}/-/reload'.format(settings.PROMGEN['prometheus']['url']) try: requests.post(target).raise_for_status() except Exception as e: logger.error('%s while notifying %s', e, target) def import_config(config): counters = collections.defaultdict(int) for entry in config: service, created = models.Service.objects.get_or_create( name=entry['labels']['service'], ) if created: counters['Service'] += 1 farm, created = models.Farm.objects.get_or_create( name=entry['labels']['farm'], defaults={'source': 'pmc'} ) if created: counters['Farm'] += 1 project, created = models.Project.objects.get_or_create( name=entry['labels']['project'], service=service, defaults={'farm': farm} ) if created: counters['Project'] += 1 if not project.farm: project.farm = farm project.save() for target in entry['targets']: target, port = target.split(':') host, created = models.Host.objects.get_or_create( name=target, farm_id=farm.id, ) if created: counters['Host'] += 1 exporter, created = models.Exporter.objects.get_or_create( job=entry['labels']['job'], port=port, project=project, path=entry['labels'].get('__metrics_path__', '') ) if created: counters['Exporter'] += 1 return dict(counters)
Python
0.000001
@@ -1734,24 +1734,78 @@ e_config():%0A + print(settings.PROMGEN)%0A print('write config')%0A with ope @@ -1909,24 +1909,55 @@ r_config())%0A + print('send notification')%0A for targ
a70e0abdf409d770ddbb9faf3cc66c26fc03b076
fix fbproject tests following new pystan version
tests/test_fbprophet.py
tests/test_fbprophet.py
import unittest import numpy as np import pandas as pd from fbprophet import Prophet class TestFbProphet(unittest.TestCase): def test_fit(self): train = pd.DataFrame({ 'ds': np.array(['2012-05-18', '2012-05-20']), 'y': np.array([38.23, 21.25]) }) forecaster = Prophet(mcmc_samples=1) forecaster.fit(train)
Python
0
@@ -361,10 +361,44 @@ it(train +, control=%7B'adapt_engaged': False%7D )%0A
7461c7b6b729c38194ebb5e88b33e7bcc73b4c9c
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/53604b1779bdbea70bed75fe1695b503e06be323.
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "1c915c952cea8e5d290d241b3a0178856a9ec35b" TFRT_SHA256 = "97f8ad0010b924f8489ca04e8e5aa5aea4a69013293e6575137176a6a8d80168" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
Python
0.000002
@@ -210,133 +210,133 @@ = %22 -1c915c952cea8e5d290d241b3a0178856a9ec35b%22%0A TFRT_SHA256 = %2297f8ad0010b924f8489ca04e8e5aa5aea4a69013293e6575137176a6a8d80168 +53604b1779bdbea70bed75fe1695b503e06be323%22%0A TFRT_SHA256 = %22b2ce14585f2707ec56b013323fde0ff10ddecdf608854dcf332c46244e0dbd20 %22%0A%0A